Skip to content

Commit

Permalink
using gpt-4o-mini
Browse files Browse the repository at this point in the history
  • Loading branch information
shashankdeshpande committed Jul 19, 2024
1 parent a3f6fef commit 04608ac
Show file tree
Hide file tree
Showing 8 changed files with 62 additions and 45 deletions.
7 changes: 3 additions & 4 deletions pages/1_💬_basic_chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import streamlit as st
from streaming import StreamHandler

from langchain_openai import ChatOpenAI
from langchain.chains import ConversationChain

st.set_page_config(page_title="Chatbot", page_icon="💬")
Expand All @@ -13,11 +12,11 @@
class BasicChatbot:

def __init__(self):
self.openai_model = utils.configure_openai()
utils.sync_st_session()
self.llm = utils.configure_llm()

def setup_chain(self):
llm = ChatOpenAI(model_name=self.openai_model, temperature=0, streaming=True)
chain = ConversationChain(llm=llm, verbose=True)
chain = ConversationChain(llm=self.llm, verbose=True)
return chain

@utils.enable_chat_history
Expand Down
7 changes: 3 additions & 4 deletions pages/2_⭐_context_aware_chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import streamlit as st
from streaming import StreamHandler

from langchain_openai import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory

Expand All @@ -14,13 +13,13 @@
class ContextChatbot:

def __init__(self):
self.openai_model = utils.configure_openai()
utils.sync_st_session()
self.llm = utils.configure_llm()

@st.cache_resource
def setup_chain(_self):
memory = ConversationBufferMemory()
llm = ChatOpenAI(model_name=_self.openai_model, temperature=0, streaming=True)
chain = ConversationChain(llm=llm, memory=memory, verbose=True)
chain = ConversationChain(llm=_self.llm, memory=memory, verbose=True)
return chain

@utils.enable_chat_history
Expand Down
11 changes: 6 additions & 5 deletions pages/3_🌐_chatbot_with_internet_access.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@
import streamlit as st

from langchain import hub
from langchain_openai import OpenAI
from langchain_openai import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain_community.tools import DuckDuckGoSearchRun
from langchain_community.callbacks import StreamlitCallbackHandler
from langchain.agents import AgentExecutor, Tool, create_react_agent
from langchain.agents import AgentExecutor, create_react_agent
from langchain_core.tools import Tool

st.set_page_config(page_title="ChatWeb", page_icon="🌐")
st.header('Chatbot with Internet Access')
Expand All @@ -16,7 +17,8 @@
class InternetChatbot:

def __init__(self):
self.openai_model = utils.configure_openai()
utils.sync_st_session()
self.llm = utils.configure_llm()

@st.cache_resource(show_spinner='Connecting..')
def setup_agent(_self):
Expand All @@ -34,9 +36,8 @@ def setup_agent(_self):
prompt = hub.pull("hwchase17/react-chat")

# Setup LLM and Agent
llm = OpenAI(temperature=0, streaming=True)
memory = ConversationBufferMemory(memory_key="chat_history")
agent = create_react_agent(llm, tools, prompt)
agent = create_react_agent(_self.llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, memory=memory, verbose=True)
return agent_executor, memory

Expand Down
8 changes: 4 additions & 4 deletions pages/4_📄_chat_with_your_documents.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
from langchain.chains import ConversationalRetrievalChain
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import DocArrayInMemorySearch
from langchain_text_splitters import RecursiveCharacterTextSplitter

st.set_page_config(page_title="ChatPDF", page_icon="📄")
st.header('Chat with your documents (Basic RAG)')
Expand All @@ -19,7 +19,8 @@
class CustomDataChatbot:

def __init__(self):
self.openai_model = utils.configure_openai()
utils.sync_st_session()
self.llm = utils.configure_llm()

def save_file(self, file):
folder = 'tmp'
Expand Down Expand Up @@ -65,9 +66,8 @@ def setup_qa_chain(self, uploaded_files):
)

# Setup LLM and QA chain
llm = ChatOpenAI(model_name=self.openai_model, temperature=0, streaming=True)
qa_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
llm=self.llm,
retriever=retriever,
memory=memory,
return_source_documents=True,
Expand Down
7 changes: 3 additions & 4 deletions pages/5_🛢_chat_with_sql_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@
class SqlChatbot:

def __init__(self):
self.openai_model = utils.configure_openai()
utils.sync_st_session()
self.llm = utils.configure_llm()

def setup_db(_self, db_uri):
if db_uri == 'USE_SAMPLE_DB':
Expand All @@ -33,10 +34,8 @@ def setup_db(_self, db_uri):
return db

def setup_sql_agent(_self, db):
llm = ChatOpenAI(model_name=_self.openai_model, temperature=0, streaming=True)

agent = create_sql_agent(
llm=llm,
llm=_self.llm,
db=db,
top_k=10,
verbose=True,
Expand Down
21 changes: 11 additions & 10 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
langchain==0.1.17
langchain_community==0.0.36
langchain_openai==0.1.4
langchainhub==0.1.15
streamlit==1.33.0
openai==1.25.0
duckduckgo-search==5.3.0
pypdf==4.2.0
sentence-transformers==2.7.0
langchain==0.2.9
langchain_community==0.2.7
langchain_core==0.2.21
langchain_openai==0.1.17
langchain_text_splitters==0.2.2
openai==1.35.15
SQLAlchemy==2.0.31
streamlit==1.36.0
duckduckgo-search==6.2.1
pypdf==4.3.0
sentence-transformers==3.0.1
docarray==0.40.0
SQLAlchemy==2.0.29
2 changes: 1 addition & 1 deletion streaming.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from langchain.callbacks.base import BaseCallbackHandler
from langchain_core.callbacks import BaseCallbackHandler

class StreamHandler(BaseCallbackHandler):

Expand Down
44 changes: 31 additions & 13 deletions utils.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import os
import openai
import random
import streamlit as st
from datetime import datetime
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatOllama

#decorator
def enable_chat_history(func):
Expand Down Expand Up @@ -40,39 +41,56 @@ def display_msg(msg, author):
st.session_state.messages.append({"role": author, "content": msg})
st.chat_message(author).write(msg)

def configure_openai():
def choose_custom_openai_key():
openai_api_key = st.sidebar.text_input(
label="OpenAI API Key",
type="password",
value=st.session_state['OPENAI_API_KEY'] if 'OPENAI_API_KEY' in st.session_state else '',
placeholder="sk-..."
placeholder="sk-...",
key="SELECTED_OPENAI_API_KEY"
)
if openai_api_key:
st.session_state['OPENAI_API_KEY'] = openai_api_key
os.environ['OPENAI_API_KEY'] = openai_api_key
else:
if not openai_api_key:
st.error("Please add your OpenAI API key to continue.")
st.info("Obtain your key from this link: https://platform.openai.com/account/api-keys")
st.stop()

model = "gpt-3.5-turbo"
model = "gpt-4o-mini"
try:
client = openai.OpenAI()
client = openai.OpenAI(api_key=openai_api_key)
available_models = [{"id": i.id, "created":datetime.fromtimestamp(i.created)} for i in client.models.list() if str(i.id).startswith("gpt")]
available_models = sorted(available_models, key=lambda x: x["created"])
available_models = [i["id"] for i in available_models]

model = st.sidebar.selectbox(
label="Model",
options=available_models,
index=available_models.index(st.session_state['OPENAI_MODEL']) if 'OPENAI_MODEL' in st.session_state else 0
key="SELECTED_OPENAI_MODEL"
)
st.session_state['OPENAI_MODEL'] = model
except openai.AuthenticationError as e:
st.error(e.body["message"])
st.stop()
except Exception as e:
print(e)
st.error("Something went wrong. Please try again later.")
st.stop()
return model
return model, openai_api_key

def configure_llm():
available_llms = ["gpt-4o-mini","llama3:8b","use your openai api key"]
llm_opt = st.sidebar.radio(
label="LLM",
options=available_llms,
key="SELECTED_LLM"
)

if llm_opt == "llama3:8b":
llm = ChatOllama(model="llama3", base_url=st.secrets["OLLAMA_ENDPOINT"])
elif llm_opt == "gpt-4o-mini":
llm = ChatOpenAI(model_name=llm_opt, temperature=0, streaming=True, api_key=st.secrets["OPENAI_API_KEY"])
else:
model, openai_api_key = choose_custom_openai_key()
llm = ChatOpenAI(model_name=model, temperature=0, streaming=True, api_key=openai_api_key)
return llm

def sync_st_session():
for k, v in st.session_state.items():
st.session_state[k] = v

0 comments on commit 04608ac

Please sign in to comment.