Skip to content

Commit

Permalink
Initial commit: Starting the LangChain & Vector Databases in Producti…
Browse files Browse the repository at this point in the history
…on course with Activeloop
  • Loading branch information
djpapzin committed Aug 15, 2023
0 parents commit 5ec3b90
Show file tree
Hide file tree
Showing 15 changed files with 538 additions and 0 deletions.
13 changes: 13 additions & 0 deletions 01_basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from langchain.llms import OpenAI
from dotenv import load_dotenv

load_dotenv()

# Call the LLM
llm = OpenAI(model="text-davinci-003", temperature=0.9)

# The Prompt
prompt = "Suggest a personalized workout routine for someone looking to improve cardiovascular endurance and prefers outdoor activities."

# pass the prompt to the LLM
print(llm(prompt))
19 changes: 19 additions & 0 deletions 02_chain.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from dotenv import load_dotenv

load_dotenv()

llm = OpenAI(model="text-davinci-003", temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)

chain = LLMChain(llm=llm, prompt=prompt, verbose=True)

product = chain.run(input("Enter your product name: "))

# Run the chain only specifying the input variable.
print(product)
9 changes: 9 additions & 0 deletions 03_track_usage.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback

llm = OpenAI(model_name="text-davinci-003", n=2, best_of=2)

with get_openai_callback() as cb:
result = llm("Tell me a joke")
cost = cb.total_cost
print("$",round(cost, 5))
58 changes: 58 additions & 0 deletions 04_few_short.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
from langchain import PromptTemplate
from langchain import FewShotPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain

# create our examples dictionery
examples = [
{
"query": "What's the weather like?",
"answer": "It's raining cats and dogs, better bring an umbrella!"
}, {
"query": "How old are you?",
"answer": "Age is just a number, but I'm timeless."
}
]

# create an example template
example_template = """
User: {query}
AI: {answer}
"""

# create a prompt example from above template
example_prompt = PromptTemplate(
input_variables=["query", "answer"],
template=example_template
)

# now break our previous prompt into a prefix and suffix
# the prefix is our instructions
prefix = """The following are excerpts from conversations with an AI
assistant. The assistant is known for its humor and wit, providing
entertaining and amusing responses to users' questions. Here are some
examples:
"""
# and the suffix our user input and output indicator
suffix = """
User: {query}
AI: """

# now create the fe w-shot prompt template
few_shot_prompt_template = FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix=prefix,
suffix=suffix,
input_variables=["query"],
example_separator="\n\n"
)

# load the model
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9)

chain = LLMChain(llm=chat, prompt=few_shot_prompt_template, verbose=True)

ask_question = chain.run(input("Ask your question: "))

print(ask_question)
31 changes: 31 additions & 0 deletions 05_question_answering.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from langchain import PromptTemplate
from langchain import HuggingFaceHub, LLMChain
from dotenv import load_dotenv

load_dotenv()

template = """Question: {question}
Answer: """
prompt = PromptTemplate(
template=template,
input_variables=['question']
)

# user question
question =input("")

# initialize Hub LLM
hub_llm = HuggingFaceHub(
repo_id='google/flan-t5-large',
model_kwargs={'temperature':0}
)

# create prompt template > LLM chain
llm_chain = LLMChain(
prompt=prompt,
llm=hub_llm
)

# ask the user question about the capital of France
print(llm_chain.run(question))
14 changes: 14 additions & 0 deletions 06_text_summarization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate

llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)

summarization_template = "Summarize the following text to one sentence: {text}"
summarization_prompt = PromptTemplate(input_variables=["text"], template=summarization_template)
summarization_chain = LLMChain(llm=llm, prompt=summarization_prompt, verbose=True)

text = "LangChain provides many modules that can be used to build language model applications. Modules can be combined to create more complex applications, or be used individually for simple applications. The most basic building block of LangChain is calling an LLM on some input. Let’s walk through a simple example of how to do this. For this purpose, let’s pretend we are building a service that generates a company name based on what the company makes."
summarized_text = summarization_chain.predict(text=text)

print(summarized_text)
21 changes: 21 additions & 0 deletions 07_movies_finder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)

# Before executing the following code, make sure to have
# your OpenAI key saved in the “OPENAI_API_KEY” environment variable.
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)

template = "You are an assistant that helps users find information about movies."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "Find information about the movie {movie_title}."
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)

chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])

response = chat(chat_prompt.format_prompt(movie_title="50 shades of grey").to_messages())

print(response.content)
14 changes: 14 additions & 0 deletions 08_chat_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage
)

chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)

messages = [
SystemMessage(content="You are a helpful assistant that translates English to French."),
HumanMessage(content="Translate the following sentence: I love programming.")
]

chat(messages)
64 changes: 64 additions & 0 deletions 09_news_summarizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# Import necessary libraries
import json
from dotenv import load_dotenv
import requests
from newspaper import Article
from langchain.schema import HumanMessage
from langchain.chat_models import ChatOpenAI

# Load environment variables
load_dotenv()

# Set headers for requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
}

# Specify the URL of the article to summarize
article_url = "https://www.artificialintelligence-news.com/2022/01/25/meta-claims-new-ai-supercomputer-will-set-records/"

# Create a session
session = requests.Session()

# Fetch the article
try:
response = session.get(article_url, headers=headers, timeout=10)

if response.status_code == 200:
article = Article(article_url)
article.download()
article.parse()

# print(f"Title: {article.title}")
# print(f"Text: {article.text}")

else:
print(f"Failed to fetch article at {article_url}")
except Exception as e:
print(f"Error occurred while fetching article at {article_url}: {e}")

# Load the model
chat = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0)

# Prepare the prompt
template = """You are an advanced AI assistant that summarizes online articles into bulleted lists.
Here's the article you need to summarize.
==================
Title: {article_title}
{article_text}
==================
Now, provide a summarized version of the article in a bulleted list format.
"""

# format prompt
prompt = template.format(article_title=article.title, article_text=article.text)


# Generate summary
messages = [HumanMessage(content=prompt)]
summary = chat(messages)
print(summary.content)
18 changes: 18 additions & 0 deletions 10_gpt4all.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# Import modules
from langchain.llms import GPT4All
from langchain import PromptTemplate, LLMChain
# from langchain.callbacks.base import CallbackManager
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])

callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = GPT4All(model="E:/Backup/Documents/Softwares/GPT4All/Models/ggml-wizardLM-7B.q4_2.bin", callback_manager=callback_manager, verbose=True)
llm_chain = LLMChain(prompt=prompt, llm=llm)

question = "What happens when it rains somewhere?"
llm_chain.run(question)
40 changes: 40 additions & 0 deletions 11_chain_prompting.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
from langchain import PromptTemplate, LLMChain
from langchain.llms import OpenAI

# Initialize LLM
llm = OpenAI(model_name="text-davinci-003", temperature=0)

# Prompt 1
template_question = """What is the name of the famous scientist who developed the theory of general relativity?
Answer: """
prompt_question = PromptTemplate(
template=template_question,
input_variables=[])

# Prompt 2
template_fact = """Provide a brief description of {scientist}'s theory of general relativity.
Answer: """
prompt_fact = PromptTemplate(
input_variables=["scientist"],
template=template_fact)

# Create the LLMChain for the first prompt
chain_question = LLMChain(llm=llm, prompt=prompt_question)

# Run the LLMChain for the first prompt with an empty dictionary
response_question = chain_question.run({})

# Extract the scientist's name from the response
scientist = response_question.strip()

# Create the LLMChain for the second prompt
chain_fact = LLMChain(llm=llm, prompt=prompt_fact)

# Input data for the second prompt
input_data = {"scientist": scientist}

# Run the LLMChain for the second prompt
response_fact = chain_fact.run(input_data)

print("Scientist:", scientist)
print("Fact:", response_fact)
Loading

0 comments on commit 5ec3b90

Please sign in to comment.