Skip to content

Commit

Permalink
Added projects files directly to main repo
Browse files Browse the repository at this point in the history
  • Loading branch information
djpapzin committed Oct 30, 2023
1 parent a0e8ec7 commit 27444f6
Show file tree
Hide file tree
Showing 107 changed files with 1,410 additions and 681 deletions.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
14 changes: 14 additions & 0 deletions Projects/2. Large Language Models and LangChain/chat_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage
)

chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)

messages = [
SystemMessage(content="You are a helpful assistant that translates English to French."),
HumanMessage(content="Translate the following sentence: I love programming.")
]

chat(messages)
58 changes: 58 additions & 0 deletions Projects/2. Large Language Models and LangChain/few_short.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
from langchain import PromptTemplate
from langchain import FewShotPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain

# create our examples dictionery
examples = [
{
"query": "What's the weather like?",
"answer": "It's raining cats and dogs, better bring an umbrella!"
}, {
"query": "How old are you?",
"answer": "Age is just a number, but I'm timeless."
}
]

# create an example template
example_template = """
User: {query}
AI: {answer}
"""

# create a prompt example from above template
example_prompt = PromptTemplate(
input_variables=["query", "answer"],
template=example_template
)

# now break our previous prompt into a prefix and suffix
# the prefix is our instructions
prefix = """The following are excerpts from conversations with an AI
assistant. The assistant is known for its humor and wit, providing
entertaining and amusing responses to users' questions. Here are some
examples:
"""
# and the suffix our user input and output indicator
suffix = """
User: {query}
AI: """

# now create the fe w-shot prompt template
few_shot_prompt_template = FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix=prefix,
suffix=suffix,
input_variables=["query"],
example_separator="\n\n"
)

# load the model
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)

chain = LLMChain(llm=chat, prompt=few_shot_prompt_template, verbose=True)

ask_question = chain.run(input("Ask your question: "))

print(ask_question)
18 changes: 18 additions & 0 deletions Projects/2. Large Language Models and LangChain/gpt4all.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Import modules
from langchain.llms import GPT4All
from langchain import PromptTemplate, LLMChain
from langchain.callbacks.base import CallbackManager
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])

callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = GPT4All(model="E:/Backup/Documents/Softwares/GPT4All/Models/ggml-wizardLM-7B.q4_2.bin", callback_manager=callback_manager, verbose=True)
llm_chain = LLMChain(prompt=prompt, llm=llm)

question = "What happens when it rains somewhere?"
llm_chain.run(question)
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from langchain import PromptTemplate
from langchain import HuggingFaceHub, LLMChain
from dotenv import load_dotenv

load_dotenv()

template = """Question: {question}
Answer: """
prompt = PromptTemplate(
template=template,
input_variables=['question']
)

# user question
question =input("")

# initialize Hub LLM
hub_llm = HuggingFaceHub(
repo_id='google/flan-t5-large',
model_kwargs={'temperature':0}
)

# create prompt template > LLM chain
llm_chain = LLMChain(
prompt=prompt,
llm=hub_llm
)

# ask the user question about the capital of France
print(llm_chain.run(question))
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)

summarization_template = "Summarize the following text to one sentence: {text}"
summarization_prompt = PromptTemplate(input_variables=["text"], template=summarization_template)
summarization_chain = LLMChain(llm=llm, prompt=summarization_prompt, verbose=True)

text = "LangChain provides many modules that can be used to build language model applications. Modules can be combined to create more complex applications, or be used individually for simple applications. The most basic building block of LangChain is calling an LLM on some input. Let’s walk through a simple example of how to do this. For this purpose, let’s pretend we are building a service that generates a company name based on what the company makes."
summarized_text = summarization_chain.predict(text=text)

print(summarized_text)
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback

llm = OpenAI(model_name="text-davinci-003", n=2, best_of=2)

with get_openai_callback() as cb:
result = llm("Tell me a joke")
cost = cb.total_cost
print("$",round(cost, 5))
Submodule AI-Powered News Summarizer added at ec9b8b
40 changes: 40 additions & 0 deletions Projects/3. Learning How to Prompt/chain_prompting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from langchain import PromptTemplate, LLMChain
from langchain.llms import OpenAI

# Initialize LLM
llm = OpenAI(model_name="text-davinci-003", temperature=0)

# Prompt 1
template_question = """What is the name of the famous scientist who developed the theory of general relativity?
Answer: """
prompt_question = PromptTemplate(
template=template_question,
input_variables=[])

# Prompt 2
template_fact = """Provide a brief description of {scientist}'s theory of general relativity.
Answer: """
prompt_fact = PromptTemplate(
input_variables=["scientist"],
template=template_fact)

# Create the LLMChain for the first prompt
chain_question = LLMChain(llm=llm, prompt=prompt_question)

# Run the LLMChain for the first prompt with an empty dictionary
response_question = chain_question.run({})

# Extract the scientist's name from the response
scientist = response_question.strip()

# Create the LLMChain for the second prompt
chain_fact = LLMChain(llm=llm, prompt=prompt_fact)

# Input data for the second prompt
input_data = {"scientist": scientist}

# Run the LLMChain for the second prompt
response_fact = chain_fact.run(input_data)

print("Scientist:", scientist)
print("Fact:", response_fact)
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
from langchain.output_parsers import PydanticOutputParser
from pydantic import validator
from pydantic import BaseModel, Field
from typing import List
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI



# create output parser class
class ArticleSummary(BaseModel):
title: str = Field(description="Title of the article")
summary: List[str] = Field(description="Bulleted list summary of the article")

# validating whether the generated summary has at least three lines
@validator('summary', allow_reuse=True)
def has_three_or_more_lines(cls, list_of_lines):
if len(list_of_lines) < 3:
raise ValueError("Generated summary has less than three bullet points!")
return list_of_lines

# set up output parser
parser = PydanticOutputParser(pydantic_object=ArticleSummary)

# create prompt template
# notice that we are specifying the "partial_variables" parameter
template = """
You are a very good assistant that summarizes online articles.
Here's the article you want to summarize.
==================
Title: {article_title}
{article_text}
==================
{format_instructions}
"""

prompt = PromptTemplate(
template=template,
input_variables=["article_title", "article_text"],
partial_variables={"format_instructions": parser.get_format_instructions()}
)

# Format the prompt using the article title and text obtained from scraping
formatted_prompt = prompt.format_prompt(article_title=article_title, article_text=article_text)

# instantiate model class
model = OpenAI(model_name="text-davinci-003", temperature=0.0)

# Use the model to generate a summary
output = model(formatted_prompt.to_string())

# Parse the output into the Pydantic model
parsed_output = parser.parse(output)
print(parsed_output)
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import os
import json
from dotenv import load_dotenv
import requests
from newspaper import Article
from langchain.schema import (
HumanMessage
)
from langchain.chat_models import ChatOpenAI

load_dotenv()

headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}

article_url = "https://www.artificialintelligence-news.com/2022/01/25/meta-claims-new-ai-supercomputer-will-set-records/"

session = requests.Session()

try:
response = session.get(article_url, headers=headers, timeout=10)

if response.status_code == 200:
article = Article(article_url)
article.download()
article.parse()

# print(f"Title: {article.title}")
# print(f"Text: {article.text}")
else:
print(f"Failed to fetch article at {article_url}")
except Exception as e:
print(f"Error occurred while fetching article at {article_url}: {e}")

# we get the article data from the scraping part
article_title = article.title
article_text = article.text

# prepare template for prompt
template = """
As an advanced AI, you've been tasked to summarize online articles into bulleted points. Here are a few examples of how you've done this in the past:
Example 1:
Original Article: 'The Effects of Climate Change
Summary:
- Climate change is causing a rise in global temperatures.
- This leads to melting ice caps and rising sea levels.
- Resulting in more frequent and severe weather conditions.
Example 2:
Original Article: 'The Evolution of Artificial Intelligence
Summary:
- Artificial Intelligence (AI) has developed significantly over the past decade.
- AI is now used in multiple fields such as healthcare, finance, and transportation.
- The future of AI is promising but requires careful regulation.
Now, here's the article you need to summarize:
==================
Title: {article_title}
{article_text}
==================
Please provide a summarized version of the article in a bulleted list format.
"""

# Format the Prompt
prompt = template.format(article_title=article.title, article_text=article.text)

messages = [HumanMessage(content=prompt)]

# load the model
chat = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.0)

# generate summary
summary = chat(messages)
print(summary.content)
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter

loader = PyPDFLoader("The One Page Linux Manual.pdf")
pages = loader.load_and_split()

text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=20)
texts = text_splitter.split_documents(pages)

print(texts[0])

print (f"You have {len(texts)} documents")
print ("Preview:")
print (texts[0].page_content)
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import cohere
from langchain.embeddings import CohereEmbeddings

# Initialize the CohereEmbeddings object
cohere = CohereEmbeddings(
model="embed-multilingual-v2.0",
cohere_api_key="95itqFSDUc4PMznsGaIqDUaGBRhwWG6sLE4XBnTD"
)

# Define a list of texts
texts = [
"Hello from Cohere!",
"مرحبًا من كوهير!",
"Hallo von Cohere!",
"Bonjour de Cohere!",
"¡Hola desde Cohere!",
"Olá do Cohere!",
"Ciao da Cohere!",
"您好,来自 Cohere!",
"कोहेरे से नमस्ते!"
]

# Generate embeddings for the texts
document_embeddings = cohere.embed_documents(texts)

# Print the embeddings
for text, embedding in zip(texts, document_embeddings):
print(f"Text: {text}")
print(f"Embedding: {embedding[:5]}") # print first 5 dimensions of each embedding
Submodule Customer Support Question Answering Chatbot added at cf412e
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
OPENAI_API_KEY=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
ACTIVELOOP_TOKEN=eyJhbGciOiJIUzUxMiIsImlhdCI6MTY4NzM1NDkyMSwiZXhwIjoxNzE4OTc3MjYwfQ.eyJpZCI6ImRqcGFwemluIn0.ezQ-ChtNr76KXlEr_lSJuxsa9im6I2X9BeDpGzMWzdz2kkC7OI9zAD2iQ8LO98gdKC0gYRcE3FAAX0FnYgXeTg
ACTIVELOOP_ID=djpapzin
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Loading

0 comments on commit 27444f6

Please sign in to comment.