From 27444f6e42dbfd140827ed537413dcee598434dc Mon Sep 17 00:00:00 2001 From: DJ Papzin Date: Mon, 30 Oct 2023 15:10:58 +0200 Subject: [PATCH] Added projects files directly to main repo --- .../screenshot.jpeg | Bin 0 -> 48606 bytes .../chat_models.py | 14 ++ .../few_short.py | 58 ++++++ .../gpt4all.py | 18 ++ .../question_answering.py | 31 +++ .../text_summarization.py | 14 ++ .../track_usage.py | 9 + .../AI-Powered News Summarizer | 1 + .../chain_prompting.py | 40 ++++ .../news_summarizer_ output_parsers.py | 58 ++++++ .../news_summarizer_extended copy.py | 79 ++++++++ .../Character_Text_Splitter.py | 14 ++ .../Cohere_embeddings.py | 29 +++ ...ustomer Support Question Answering Chatbot | 1 + .../DataChad/.streamlit/secrets.toml | 3 + .../__pycache__/constants.cpython-310.pyc | Bin .../__pycache__/utils.cpython-310.pyc | Bin .../DataChad/app.py | 0 .../DataChad/constants.py | 0 .../DataChad/requirements.txt | 0 .../DataChad/utils.py | 0 .../Deep_Lake_Vector_Store_embeddings.py | 41 ++++ .../Embedding_Models.py | 9 + .../Google Drive loader.py | 8 + .../MarkdownTextSplitter.py | 41 ++++ .../NLTK_Text_Splitter.py | 9 + .../PyPDFLoader.py | 6 + .../RAG_Chatbot/app.py | 181 ++++++++++++++++++ .../RAG_Chatbot/chatbot.py | 108 +++++++++++ .../RAG_Chatbot/requirements.txt | 9 + .../Recursive_Character_Text_Splitter.py | 16 ++ .../SeleniumURLLoader.py | 11 ++ ...Similarity_search_and_vector_embeddings.py | 32 ++++ .../SpacyTextSplitter.py | 14 ++ .../TextLoader.py | 10 + .../TokenTextSplitter.py | 12 ++ .../indexes_retrievers.py | 87 +++++++++ .../Adding_Transcripts_to_Deep_Lake.py | 106 ++++++++++ .../Jarvis Voice Assistant | 1 + .../LLMChain.py | 128 +++++++++++++ .../Twitter_Algorithm/app.py | 50 ++--- .../Twitter_Algorithm/requirements.txt | 4 + .../Twitter_Algorithm/the-algorithm-new | 0 .../Youtube_Summarizer.py | 98 ++++++++++ .../self-critique-chain/app.py | 0 .../self-critique-chain/real_world_example.py | 0 .../yt-video-summarizer | 1 + .../AI_News_Chatbot/app.py | 0 .../AI_News_Chatbot/chatbot_app.py | 0 .../AI_News_Chatbot/requirements.txt} | 0 .../Adding_Memory/ConversationBufferMemory.py | 0 .../Adding_Memory/ConversationChain.py | 0 .../Chat-with-Github-Repo/.env.example | 0 .../Chat-with-Github-Repo/.flake8 | 0 .../.vscode/extensions.json | 0 .../.vscode/settings.json | 0 .../Chat-with-Github-Repo/LICENSE | 0 .../Chat-with-Github-Repo/README.md | 0 .../dev-requirements.txt | 0 .../Chat-with-Github-Repo/pyproject.toml | 0 .../Chat-with-Github-Repo/requirements.txt | 0 .../Chat-with-Github-Repo/src/__init__.py | 0 .../Chat-with-Github-Repo/src/main.py | 0 .../src/utils/__init__.py | 0 .../Chat-with-Github-Repo/src/utils/chat.py | 0 .../src/utils/process.py | 0 .../Financial_Data_Chatbot/app.py | 4 +- .../Financial_Data_Chatbot/requirements.txt | 0 .../ConversationBufferMemory.py | 0 .../ConversationBufferWindowMemory.py | 0 .../ConversationSummaryMemory.py | 0 .../Token_count.py | 0 .../Custom_Document_Retrieval_Tool/app.py | 0 .../GoogleSearchAPIWrapper.py | 0 .../Python_REPL.py | 0 .../Wikipedia_WolframAlpha.py | 0 .../__pycache__/requests.cpython-310.pyc | Bin .../__pycache__/wikipedia.cpython-310.pyc | Bin .../google_search.py | 0 .../requirements.txt | 0 .../LangChain_Agents_and_Toolkits/serpapi.py | 0 .../using_WolframAlpha.py | 0 .../using_requests.py | 0 .../using_wikipedia.py | 0 .../Recreating_the_Bing_Chatbot/app.py | 0 .../Recreating_the_Bing_Chatbot/google_api.py | 0 .../requirements.txt | 0 .../Web-Based-Question-Answering/app.py | 0 .../What-Are-Agents/app.py | 0 .../What-Are-Agents/requirements.txt | 0 .../What-Are-Agents/scifi_story_generator.py | 0 .../Generate_Search_Queries.py | 0 .../supercharge_blog_posts/requirements.txt | 0 .../autonomous_agent.py | 0 .../Agent-Simulation-Projects/camel.py | 0 .../requirements.txt | 0 .../Autonomous-Agents/AutoGPT.py | 0 .../Autonomous-Agents/babyAGI.py | 0 .../Autonomous-Agents/requirements.txt | 0 Projects/FableForge/Sales_Copilot.py | 87 +++++++++ Projects/Movie Finder AI | 1 + Projects/Voice_Assistant/chat.py | 136 ------------- Projects/Voice_Assistant/scrape.py | 113 ----------- .../yt-video-summarizer/Youtube_Summarizer.py | 133 ------------- Projects/yt-video-summarizer/app.py | 125 ------------ Projects/yt-video-summarizer/app_aider.py | 135 ------------- Projects/yt-video-summarizer/requirements.txt | 6 - 107 files changed, 1410 insertions(+), 681 deletions(-) create mode 100644 Projects/1. LangChain 101 - from Zero to Hero/screenshot.jpeg create mode 100644 Projects/2. Large Language Models and LangChain/chat_models.py create mode 100644 Projects/2. Large Language Models and LangChain/few_short.py create mode 100644 Projects/2. Large Language Models and LangChain/gpt4all.py create mode 100644 Projects/2. Large Language Models and LangChain/question_answering.py create mode 100644 Projects/2. Large Language Models and LangChain/text_summarization.py create mode 100644 Projects/2. Large Language Models and LangChain/track_usage.py create mode 160000 Projects/3. Learning How to Prompt/AI-Powered News Summarizer create mode 100644 Projects/3. Learning How to Prompt/chain_prompting.py create mode 100644 Projects/3. Learning How to Prompt/news_summarizer_ output_parsers.py create mode 100644 Projects/3. Learning How to Prompt/news_summarizer_extended copy.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/Character_Text_Splitter.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/Cohere_embeddings.py create mode 160000 Projects/4. Keeping Knowledge Organized with Indexes/Customer Support Question Answering Chatbot create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/DataChad/.streamlit/secrets.toml rename Projects/{ => 4. Keeping Knowledge Organized with Indexes}/DataChad/__pycache__/constants.cpython-310.pyc (100%) rename Projects/{ => 4. Keeping Knowledge Organized with Indexes}/DataChad/__pycache__/utils.cpython-310.pyc (100%) rename Projects/{ => 4. Keeping Knowledge Organized with Indexes}/DataChad/app.py (100%) rename Projects/{ => 4. Keeping Knowledge Organized with Indexes}/DataChad/constants.py (100%) rename Projects/{ => 4. Keeping Knowledge Organized with Indexes}/DataChad/requirements.txt (100%) rename Projects/{ => 4. Keeping Knowledge Organized with Indexes}/DataChad/utils.py (100%) create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/Deep_Lake_Vector_Store_embeddings.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/Embedding_Models.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/Google Drive loader.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/MarkdownTextSplitter.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/NLTK_Text_Splitter.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/PyPDFLoader.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/app.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/chatbot.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/requirements.txt create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/Recursive_Character_Text_Splitter.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/SeleniumURLLoader.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/Similarity_search_and_vector_embeddings.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/SpacyTextSplitter.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/TextLoader.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/TokenTextSplitter.py create mode 100644 Projects/4. Keeping Knowledge Organized with Indexes/indexes_retrievers.py create mode 100644 Projects/5. Combining Components Together with Chains/Adding_Transcripts_to_Deep_Lake.py create mode 160000 Projects/5. Combining Components Together with Chains/Jarvis Voice Assistant create mode 100644 Projects/5. Combining Components Together with Chains/LLMChain.py rename Projects/{ => 5. Combining Components Together with Chains}/Twitter_Algorithm/app.py (51%) create mode 100644 Projects/5. Combining Components Together with Chains/Twitter_Algorithm/requirements.txt rename Projects/{ => 5. Combining Components Together with Chains}/Twitter_Algorithm/the-algorithm-new (100%) create mode 100644 Projects/5. Combining Components Together with Chains/Youtube_Summarizer.py rename Projects/{ => 5. Combining Components Together with Chains}/self-critique-chain/app.py (100%) rename Projects/{ => 5. Combining Components Together with Chains}/self-critique-chain/real_world_example.py (100%) create mode 160000 Projects/5. Combining Components Together with Chains/yt-video-summarizer rename Projects/{ => 6. Giving Memory to LLMs}/AI_News_Chatbot/app.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/AI_News_Chatbot/chatbot_app.py (100%) rename Projects/{AI_News_Chatbot/Requirements.txt => 6. Giving Memory to LLMs/AI_News_Chatbot/requirements.txt} (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Adding_Memory/ConversationBufferMemory.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Adding_Memory/ConversationChain.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/.env.example (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/.flake8 (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/.vscode/extensions.json (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/.vscode/settings.json (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/LICENSE (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/README.md (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/dev-requirements.txt (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/pyproject.toml (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/requirements.txt (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/src/__init__.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/src/main.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/src/utils/__init__.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/src/utils/chat.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Chat-with-Github-Repo/src/utils/process.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Financial_Data_Chatbot/app.py (97%) rename Projects/{ => 6. Giving Memory to LLMs}/Financial_Data_Chatbot/requirements.txt (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Types_of_Conversational_Memory/ConversationBufferMemory.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Types_of_Conversational_Memory/ConversationBufferWindowMemory.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Types_of_Conversational_Memory/ConversationSummaryMemory.py (100%) rename Projects/{ => 6. Giving Memory to LLMs}/Types_of_Conversational_Memory/Token_count.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/Custom_Document_Retrieval_Tool/app.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/GoogleSearchAPIWrapper.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/Python_REPL.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/Wikipedia_WolframAlpha.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/__pycache__/requests.cpython-310.pyc (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/__pycache__/wikipedia.cpython-310.pyc (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/google_search.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/requirements.txt (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/serpapi.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/using_WolframAlpha.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/using_requests.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/LangChain_Agents_and_Toolkits/using_wikipedia.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/Recreating_the_Bing_Chatbot/app.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/Recreating_the_Bing_Chatbot/google_api.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/Recreating_the_Bing_Chatbot/requirements.txt (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/Web-Based-Question-Answering/app.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/What-Are-Agents/app.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/What-Are-Agents/requirements.txt (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/What-Are-Agents/scifi_story_generator.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/supercharge_blog_posts/Generate_Search_Queries.py (100%) rename Projects/{ => 7. Making LLMs Interact with the World Using Tools}/supercharge_blog_posts/requirements.txt (100%) rename Projects/{ => 8. Using Language Model as Reasoning Engines with Agents}/Agent-Simulation-Projects/autonomous_agent.py (100%) rename Projects/{ => 8. Using Language Model as Reasoning Engines with Agents}/Agent-Simulation-Projects/camel.py (100%) rename Projects/{ => 8. Using Language Model as Reasoning Engines with Agents}/Agent-Simulation-Projects/requirements.txt (100%) rename Projects/{ => 8. Using Language Model as Reasoning Engines with Agents}/Autonomous-Agents/AutoGPT.py (100%) rename Projects/{ => 8. Using Language Model as Reasoning Engines with Agents}/Autonomous-Agents/babyAGI.py (100%) rename Projects/{ => 8. Using Language Model as Reasoning Engines with Agents}/Autonomous-Agents/requirements.txt (100%) create mode 100644 Projects/FableForge/Sales_Copilot.py create mode 160000 Projects/Movie Finder AI delete mode 100644 Projects/Voice_Assistant/chat.py delete mode 100644 Projects/Voice_Assistant/scrape.py delete mode 100644 Projects/yt-video-summarizer/Youtube_Summarizer.py delete mode 100644 Projects/yt-video-summarizer/app.py delete mode 100644 Projects/yt-video-summarizer/app_aider.py delete mode 100644 Projects/yt-video-summarizer/requirements.txt diff --git a/Projects/1. LangChain 101 - from Zero to Hero/screenshot.jpeg b/Projects/1. LangChain 101 - from Zero to Hero/screenshot.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..31bdd9f3afa986d5ad7b929f3c06abbb4d6782dd GIT binary patch literal 48606 zcmeFZ2T)tfvM4HV=K$UqFeYz<4Yo)oo4jo@7%&Er5!;9?f)QDy?L-a+447=9Kq8nR zk`Qc@WU@&DiJX%VSzv_V$>-kl?>VpPyn46lhxe;auhLpQ)4gVTS~I<7x@WDk@v~XL zuR0pq8h~@>0DyD!18_EfZd_Yc)#mvN0}btG>i`z~ch|;QWXG%Ck)a0P5ZY0C$N0m3Qwm z0C4ji08l&dUwQwflUFZcFaK8D1^W3H2L}LP7Xbh;m;(UJ-vNMYmVcwsPya%;-{>SR zx?CRg!x7*Lum}7G&<3~zAOKN1CINT^5C_PdO#oB@=YRPLKhNjsaN*L0pK#^UrHhxY zUb%Md>XoZkuidzLsPNbFflOR{PkC+U$5P`#eC~mW;*`s&qB`qyz~5pEA$(G zy?*sNJ@>x}XMX{huKa>JPdR^%5AX}qx${it&e{Pybanr7{@hRadvh;czIf&8g=^=3 z`AN6?6>#ptx%1~PT)z0r1xCip7q0@&{c`@o#Y>l&Ze3x%JIKO%n@`**@#+&p<2;F% z{L-F2Y%;QspL+Ygev?pHRb9h=Pg3fIiK#6tDLHp|gK~!tDBd1aEjFX z9~R<|^Do-nB>M}Z6qDUl?@2yAh|^khLHEMMu8peisH<9DPU#J_H@Z?=NX_MmKU-&)sP|Hh>=Zr%ht$oT^u1;GaBrJ`K4BF zP+4g*vWFKJt^{oInzxTI1JEB2{|YX=Rdu7(Pfk6#ojkGmFlmI{IlaQ|S+L);eOn+O zc2bbR1(w+`$>%j+1o5g>^`+P@!7cOaeq=qSI`{cazJ9@8f0TdULl}mbj#x6@UU@70 zBUV0{>Wp0jr$M#I6#)T<^|TIsLnc>*%IguVH09}<%4%Cm zh6-{|<@*HB2Y78YshyOqqYkka3=sB2C#a3VW-j?)aB>T8 z;(b4=gf*Nq2$5m2omF+(@q((2xPA1N(!Xv}9G+I>++Auwk~ z;l-Qsghsg+y7|1i3%%6Ts~V;Dxw}vldtb2%ZL~cxL&hEoH|cE*zn3T%ItP^St(nn*&t%hk(hC41iNAG0Y|@pVHb%YF*N2ntVC}IXbxqO*-VN-YlFD zdL>Ks^fGf5Eoo8p*rD9ZkL@u2vN~JG%xY09s8PWzx1NbGeSk;u@%baaQlPbLP7INq zc8@IBtSziS#~#HW#TxL?5mVW)KI?LuSvwEqYtu|Y@(GiI>|H4oFV&fPOS!mlefseD z(y?J7pLfF~7w0`$IttBS&x)Sxoq22T@+wXPiHP#g+a1|@*Rmyh2Kb?Ss=C7LE<$W+ zJO&?ZNQ_rQo)4us7s9c-N_ew!ugqq;;Y1>vfr!D;Fx~rC`QOGJd`C(q&&f9iscdoS z$9C*N%Uck^swW#BG|3otLU1`%Sca2Tzuz+8b3+-L@rAR9g~m*!PL?CKd&%zM`Gvp) zxrGw)&O&y|$(0pngQ@`s35CFEpjuEzz$!XUPRYmP1VhR=OKh#TDd+UQez}8#G^ORP>p6 zFj5KjMTl0&>hN2Ua{?+{_~hia^%Y@~F*U9Wj~bP`-A)Nv$W(J@Zrq3&Ry?U+7d)z1 zu5V&mA!UgE_C?-r8xIsp{zh-G7{ae@0?R*UD zl~1_*%xVo}XTy}W(gU(TXnS_dpn9uHV8XZj8x)=qnK4qo>|E@*YiuQwzKz2B>Bny= ztlF={tu=Y#Is#V6DZW2Q;x*V5nHBwYTJwkn=B}4m#(^uTccM&KFS@)^M^? z5d?a?@>g2KihIrA2M^gl+}k*Hi8<~*w<{*O)lrp);_NLZfAvZ?4yvtUA{LjrL;_f? zd&W9gnd20%NAXr&*HsCyi*4*tU?jXifQ>C>eoK7_C!#Rl5=fCTQ8Iaw(7|2HpG92qV{0LZ_I>#Fh^dlc*$#E|nVc}uEs%D-PVyO942g9lk#%mw=N{4q zLZ=$I2dBL;h-_sgA>6DH=U@#l1mD;-5k)y9Gb&t-SJzbGeZ4%|#fghHapp)+Yoe_Z zj1Jk|M?)R5J6s*w)?>d9SyY29yXW|#bhZ~gh7&Q#LzU6^@%74{*`*^nMcuk81X1%0 zP%ynYMx0rdB+Z0sbj;{eqKD-J3)I)hwA;!7D+{A@M-L&vc{cdaiZcKjbp|Na@LcXm zFZ5xVWAgLWCsm@T#$@K2a>fbIPp)=u)#*Ljyp}Fck;Qm)GdE1h(6DTY`D-_-l_hdQ zrRvZX>7?J|U-opp%t6=OGDploU(uS^0{0871*XvsR_1C5O@=$B3JdTptqt?quRwZl z^W5--JXu6@*N;aEsNT)Ounb|gKyJ>qu__51&uOK~-F^I;+IG@24rc%}<8P3=K9Mz3 zDA8)x;`FHU{eXMR8eO<3X);V9j=3;I%8%&TOzbiAO~vGiTO{4rWX|QZxc0&1%j&0z z^4O~8R&QkBDb*tG2%#CyRiVU3QfedwFOzWj=#>0AktWO^shlD0-O=~>Eu66>|gA-f3yk z9pX&za-A~(WWdqlXc^x2N|qzgv!l04)i=fvyP6Hei)acNySSE zj%zt1Ht}bGZ@V);@@U_Ahj=nhoe-z*!&wzHC~2F_`GHy+M7U!Rdg_{gpE^wbV_HrP z`XvNh9HYf`5MRhVJDsiKxq{0OMWu&rleY*DzYz186)}1VJx--OmoK8r@~K{P6k&4*IknIl8T?7c$t<1{z6e)MP53p}uj2@|i z>UFx2!UQ;xopw%#+Cp=h+YvQC)|NuJ_Hfhw7+`QRHGn4;g$mRhMrZWd^K0jS{R(qI z7q>TyXf_KL2TiCKm-?%PckMl z^8F0Oc&mTFO*9v85eVBG$IALW*97fL{9u1^hiAO;cs8hiuJUw9PCl6_$Y?&Y4@1fE zO&wQqtdgdTX+T{WD@J5@lgVENI3`W=^0{=7KX|2Fmc^~2zsJ%aYK7Yi$4BK=0ylw4V*a}V6E@V2+_qjOrDN967$Y#bpq6)XpKrGM?}|1HMBN=DY}3){=S z>P^FZ9}VQ>vr{Avo*~Gioc{_BZ5`XYrL=x}d^Nx(poQ@%r;*aLO#_Mq3F|{>4 zx15fyit1;D%2{?X)%RG&zEb!q`GoS-z#Nf$TIHtxuOf!i`jpLX?;0&cXft_`ujavE5CuHlGmqQ$O(fl~2`)ZxS22gu)KNQ!o+B^b8P1VG zQGy!rUO!;EV0Ba2(h*RQEcY^t^gdK&f>Vz0*16m=rAX!s;GK2@Md05say4u=Up6DQ z`x)3=iFJV?N48gzJo^RMAK_$b@)faV z9<$LSEm0+rQ#n;h)f$Mu^fB->rE^JdWR-;sXVMSAhHRZqBy7J4VrGz^t!-ZU9#_@E za|vSgWy@#lLFL(nIk_2cIo&NP7oCLg=mJQx!ne^)7nEuI#RVLB=`pB1>&|~;)%{Pb zbW8iMR?B%djvfa&t2Z~Hyc@STR4Ey*DSI2!!!XBv(ZLNp%^0hTS6+|>&$%yhFPP{T z`6Xy99|%6|_UeX(-%ycQarl#`6l`?&UDt5$yjz#a!PNz{h5uN%x)4_!WcV;YfmP*0 zrOLkbruZ3Pd$VEY3{bqNvK{*;RZ`~ixdx?NSQT00szCe`V@rc}oAQ#=nXSOudxMxI z6c~vU+~f~9;&!m*8#GuB=A=YZCc|#@Yn10_=vhH6*$T}a_Zsy+rdGRjYH=n)#1@c-a?rL z!%~s`@hY|lwfc&Ih|V#j*Ac8;_zX}5ImUC-2Ek{5d-kD}?lXXq8}>A)`n24P*A913 zszA-do@%w9K2?!uq+0szfmci2(+HdQ6Ej%jN6Tqs;JT1Wl zo^(NJ3cAsxSEsqh6IA+qaduV)vZ(T+it1SMSDzj_mDsLbl%EnwKC$Q$5;8jR@H^@o zAn{D<2J&y_y~N*-eYI(hX@0zQJT<*XJw9%rEoTJRsNgIOXsEO^fO4)~aDspA6sjg5 zs60^6X8d|;!2yGuS$X9egP&d`1f~$CR<5q@l83}{mEht{3>M!WNORWl%zJLGMHB{RQ?UFf&wV<@U zMCS};X_b(h@I2!)E{th-qM#5ObI#!2YV~vFY*j<6NT~N1>=nf%KD*f}s<;9!Z74T~ zM0EpU#rY1E6q`vUw+8Dn-=gVNC{`88Yb3|wv(#s|(sX1PU<|!&DrXV3@3NiEmG#%8 zeN9GiNlLtFfdJ2n&+yI$*&4^&lC)yrfp#l9Um-=VFT_{>A(0Gw2IX#TX-KYEJ3W?- z-^$V8P%&Pk;RgM&*01YWCpALe`F>7_?y+HsS&65PvZ}Bc`6n3Nvi(tU=^Le;6kH#i zUB@Qro}G&IdOB4t(qsZIbX2Z+BsZmfqu<4onJ{3$ILFHTaRnD6=1A_qY!ZRJiEDb| zqLSBH7}LDWoo2w9acdwuPdUrpa@cnEJWx=v!SL}4LJE|Z9D+Xs7*Fw^&{SzJ!-sBR zA*EG(UPJw%?cx39nL`{&zi$=Y)Y!P86a#i-Dscuiyzt#VDsT()54jeV2?WBT=uljY z7*zYeoC-N?c{{K80|WIuZr9HAVA2vs$Nu;>3)_X~U8CHyhcmSS^Z6qqwk{aE_ym)e z0*V{2E&oqdfD`Bp<39(pqv*XizRsG~* zWPw>r!up8Ii6U2%nBdF0K$}?wIoE6ttHGhoRQ7_(DUhcz$A?P8$F81r@8)G-CEeRi zfDa|UP$D||mfEyM75`|( zQt(Xtg^bPT?L#dDW=VLji1&DJtqh!OyaUNnfPhtQ`fA?2N|K$pTw~)>w$R&C;!WY_ ztUC&F?N6vD>fklqwjA?2e=K@s9`5@^6;FTMUNU5U(@Lc)uXMrq4Di^tWsHv>>;@Vv z3$;Dm!IMH==2`}7&CUR>p$9hyRi|}mX$PA7=4@*AhgOez=(?48Nes> zgloxQ#c@p;cXOS$tfjH#l=1v9rJV9^EYLhps-L(TT{Tau(~bXT2kkD+LZeExzK9!j zV4)H|a%|ZgC5PD_m2Jo28y;l(3ij=v<;q0fg)+qHXzv*y=eb8ls7?SyVZBc^bwbY* zA&~}^**S_S^BW9v=Gw2AZpXSg3X##Eh$g{sAnNc_OD%*=?^w)TQa@5jRJgFaEc9x( z#!>o;;L-TZ4li!JkM@9Uu$RocZ=f{eT5%vVF|*R6vc7PTQ`r6J9D(6071&Zf!Fz&B zI0Hx{G?fR%HdMyW0L0NV0G?kCM*)Re*Q)%8=Rbw&$Nf)h?w-BY9YQPrDraTX@6+9u zCdo%@6x+lKnrM9B0W9bwtAi@#cdMw0lv}ha;VVy0P6@hSPb8ZtZ?3O4oXAD)R2=rr zlY0GNZFRbZe$d!{^M>_Q8DnCLWb3^-7RoPob3zRQ-)5==@?CjfGUtVe94*0R$5d@VE;J2LV z2iWU0g|)2}i=4G|Z`^K`U+}#%z>(i+{uy8>XyTs-u;3Y>t>u9CG--LlI#mBS{0DyK z*pchV+A`@9K;iy=&S#YZCE1ff1KM>4&dHoAN8ScZb+IhJP;)KNctlQnS7HIR)_Is9 zqX|58%ff@?^h2E?r#nsQ=Z{r~L;G0H0N=kX`ZKy7QkUAJJbtVbjZ}7YSv1%kJ z-Ue>Ik`#t!^`|CqMEnHL>bg{?Ztna#;&Pzgw?pSlmaCC%*lF$)R~Tw#A z2309ZNj$a7KYbl8tPte8I!*A)cn+7E?63BpL-&N{q6LI=h(I7Pcw`W@-5fm=c-LYEcM(frQaQHG9vi zmm8C>ySs#{K%nGaFA!@vnYgIQw@Wwr&f()bW#ayBGk6P8(i%5bJuD0!(L!55X)Spf zhVrC={7;xd&%1?;=v__6XC+tpJxaE(7RdO9?-^im?mkvv?M}lU^<$_$e^RlEUGMpDq9|C=FAl)pR|};#48H9 zuX26xhHK=eBQO#tn$SykG3W69wSj=@dBjch_fg-MRJ~9eakw_>#Lj>G!hG4bgT<*O zI(uK$>x3xFoqnAnaRxBRNIe-;rAfVSp>&2m=jNr2tDhD+6)ShztnI2)Gf9$98t&K7j%TrAt^O&kBM zCC~KDK0+W`%sFQ1hRh^cG^N!^-h1$VC%9{>;(m|2SL2N2%W?`fV)~tnP-=XD|Bgz0 zm7h68aw;{Wa6>2B>zPBjX*KV3q$HK1@PqZ9@yd=1Vlu?KNx_`*k%TU;Waqw`yB}w& zW{=Ddh9u&w(sd3Kdd67dquO0vyL#ikzd4t}+uIVHt1~{} zvM;NJp$`|j+hYwdln|9L-s_YaO7%$EogEH8C_S?JFvJdqGf80g;L+C zgSLg21DZ39-I zc((w7K1hS71=ZK=+YW>~=>)Ar-by@uh>*QoXQXIXgt%n$csZAf;ubstZ6 zbL1?u>g02reU=?Pd_t66(iEi;V+r?S&8N|<@cSMrx0RaY*tU%x8&BpGh5Gs_7(=!E zc$F%=YSDUjk-znd5IEg=*0AD-FlobHV*@NQ8;IHg8C9PF9PnR8i5pd&yw5j+jZ#PC z%wcJFW&(xamcCs0{$hM-Vne^N+P2!tU-LXR1}k5#8%oye9*wq`r@Fwc9oyDeXS!v~ zjuYg^o5UFP4gGDHYgFuAw|fZ8Oe`?|97f1J0+UK5DC48S4)CFsURvBHj0%#C_LJFz zdru4f?w(ySEQJ`}Ot%;hT0$CX3x@LfG$be$m(MZD8L#~HCoL7zV4^(I0Od?B5uWbf zP*nT4u>`B*mZFHDcN+M;tAGvtf#$8SW~D1RuT!YD4=tkuy@P)UVbLV#(YS{orLl&j zmJ+5=mUR!6?y#iIEl>2`ZmKTaEG)9JD}&~MXLPNyI>OL~CerIGZP{G-3dV)^%cAQ} zIW_02e5Y5WWIhi#3cFEkFCZK!GV_a0svJldUTbm7IX`I%Y{?X2nd%Piu{0pGXs&XHHYy_rZ0FKa z*5OI~D!%1uWP=bO+4cx1_Vy!2g&;&SJOhZRP~}q20Nt4)m*wW#>(Z@<&H#25m6|6t zbU)p)CH>Li1*zVG-Vkl~Z5p*K7&H5}iUob@>InF*wwN%2s9Cf9GCmzaE~1vNf!W@} zJO^GQ{~T}qEv+upm6AsCY_L#=j3~6#vOg2hUY-sQ^7c#L>&!TR1y12#hNHAaCdhEhY$N1}W1A-E-n-YwB@Dr(YdEO9tzRPcDI`~GL z!jH3s*lf`aO23SP$(lNGQ2$C!)*$Bjh+=aU2LzP+PrA1e4M*samzxh5HTjE`Aj3H2aOCZJ%A&=H~J` z(+2PI@=65N_Q7!jd4{UEgNx_NtvdtQgCVaRNK-bKku`QlleguU!e?Pa-H{tt?k5ka zM2t63y!8Xd$w0{czjXS;SPOh()+p}!UEa-lj+!!RQ{Ci zC6aiSKJ@WAn5i^2)+GRWqLm&o1mY2W5&U^7*?Y!;yT9XWZt1wSB+^9}7HcU{T4j_d zn{OXo7G%}FVT-ozX#%mD#+j&(M{i!}^>u%b38-fgb@XN#7EZYD$6q#|y|mQ-o~t-M z*0cix(q{!325eh75><&dy1rpk$1F8mYhL6-aPi7K5cp0O@88n9%x z7*!%@7TsCMW|Y5rUCVcbgedbgwBO0t$zRjt3$0(;^;{6NrLC@(qPTMA7q&s-83_|@ zRYkgMSv#Ap0 zPw^7jS)dxveE`0Sw6lNIz21X{!4Z;)g<=8V>8I`6ErmuUaS7d{b3sNQjgOA9syl)U zvZ94-*Tymej_KYuS9qX83YaTa&3GI6YLYWWJY`W&)JpV7){Sa9F%!(Q_-(iZRj~x+ zLk}KXKSZRKJcjY7SA=PLjzknAGZEpJA^WCoJY7^o0e^Q7=2(Rjd;;t-!)hl~JoSZT zDnGpYh3;wWkkc=!m_&yE@-^)xJCoF=ejfGrKrYuXFNQjWP_G*&*3nINrE8gR0Wr2~ zd8nc@0JO72GTC4Mli>^x$SCf1!RFG0Wwq$Ougk#ZDbd}gT=LNUdCIlyqHm@fuD)i6 z6;|KHBik%c^IJTMjAn27!$3VrfV<&PWT$#UQYS)-=Dt`1eBknO@Q z)P9#&QK*G&IXuH+bk#mWUAa#muW*iV25>ph&zR~9UgwI_d=B?<3I_R(1zgTo>n}%c zV@tNAyIPTA;eWjkMkwV|AT=^Z9B*RqDa8T#(2|Y%4Wwp z1|3+Ra#@;t)b=(XwL7vESV4Xe(#~DsK>>~7?R$)?694KC7 zPlH$$mmk8KV(av|bzL%V1E1wwE|8VC z;T+ykQ>QJ76UR1z#Ioigc7}(Q#PsY^JX@}hzbIr3n;WMo$i3T}y;!ccQdnv#sOSR} z#KnF7L>E7&3}xa@NZ;&Q%nn%euIk}W?hl-7YUoDI|J(|Lc(r5WENI(3-srPD9g@aZ z!O4@|zmyuzZ^)_$%#^)oA7hWImtj_AZ&V^dkn{B(^ZW8ja;_~_{aHH#;ZP>uKO z^q?6%P@veg==rE(m`zP*<#5K^mc}iMwHxt>IWM`A&B5x)F1A4C)UQ*wq$}tFASRt& zrZ}dUNg0PBk)|8naq?nNY3L0-P4v6Xf^6(l>?W1P73>rR>+-w z7<%ra$k!1TZ=M2jLaMWaoz|KTnTcz)6mukPyGIo>!7R)cXI3yjX@Jn}#TLvrj-*;$ zZxv>0;O>sMMd=@!*D-#o)ORlJZU!!er5Y{5PnCcO$v|J<^*W;OK`?V3(6YlxSnqvS zY$bUhDN9bHPsk9&(XI{-gSGG!?#yu(+s6{dTp`oeJA;9a_I5fAlcmom$YbGf$$&(6 zHZpiKelBDyQv4mH#4q3bwA0we{ZPbz`_4vg#;5za6J?UBCfRG)5_!DbY|f=JMhya=!o6t-*a1nYZn|w=E|g9&)hC4|b>X|Ti#EHGdi%g? zfxp$1nk~HjxNXXB&DS7z7@&5v*RHnb_=@iK2~G?&S}*UOh-%DdHO2c2=d)YNMVaoy zjUOP(0=a21r67W`SQg{>mrH!B0e!ql}A-}t=wB7f?Kvttp+1l0u znCBsii*ma6_V=|Erj(LMgs+?LM-uy&@j!D1$ugN9ZkEZ?Lw=R5C|Y6JK6%`-K~x zO9F1gf?nzQx?9IR#?sZ# zVsT`@7B=iY{H<27n|OP)7*`PMA*wYU>lkear_Uq4f{Zl2(&vTX+GD^-#@aXrynvE5 zCwKNcuA3&*qS{h=7laRA2u6d)mtZJxPW+W(r_-=aZGvU>mTMVqv=Z{U|BnN!5AZsp z+3_UJC`i6z==qm(a<<>1%7b)JtYIQxEtfYx*c?7M-v9O@<6klgSVEgHCDjeG_O)%`W$&d(*4xH~_LeM@K?>heQ?w$F_^=b>r!{dcEJ7w-VZQcsn9?g4|P_o0CW3>;fVUIRNAw4K>H+pr4-*)A2 zf7j%*@e>%}KXzgNOBw!7I6w1tz|p(YJ(pH2HBvcdXdw~Wn&9bjG_%FZ@Fy$OhT zLtFXtobAjeFOgSB$I9I(#P9Z!c6w>zb&jI_1T1PtQ+fyq3|SKTd;@Uq^WV$;|L@g) zAMT8BndJA$Ajuk(YgE}=+hiX+JS^Kv&h$U<3$_Y5z40VrxMH}{VXX`0vgkTT5CqkG z4&yGS-HB)>!L)GbC{(MD5Le{Rn$2q~xzhO%Dm*`uYy26sPLHN&=}45-*|>2@(0H8z zOVmS+T5<=Jb*|A=i(Y*swJ>qMV0~j=fnWQk?%7mX1$T{uEGBGvfulx#n>oH`_|~YX ze`XT*1k?Xj)Lv@h71n=QZ&IWHr^1(XAVb9KH{WV(jW>1-q$P%=>-zZ$+KW%biw@Zx zaw;j;599ayo|)uz3>Ch}f&w%99GpaS*I=C^a}gg}qsn2_;-*_ujuRZ*y_Gy)x;?U~ zPOj}o-en2HaPU~b`=q-p*IB&A62|5%w!bZR-1~5>Fv@DL20!3W8p!N@saWUJwB@64 z24Izfqh)XDk)LF7HdtG!*W~k^-JMgvEuoAmPWO8;mkD zaSkmS!w@Q8>)xqXd3VnF`Vg)0ZDh)r^&}@{n^;t9Qp73puJA?Eo%TSyOOK+TfOplt zTtt`rosm^=k(h(zl*q#EV~5Ei3s*GiXP?Ny}8X@isXtIhuWnEc@Z9Y*jyV)2uWgu)pDO=%7<3R{GO= z<)*JL&b~GpL~fphDjDVE7L;_rb?a&rk9uV+cn%w@NWp7@u--2%*_QVnVGWVk2gSu7 zXrIPU7?+)_+a{F^>m{6HOs2+g#F`ItV|S%%Ez)+p)kadJwB>C14ZCB!vLjYHM@$Q6 z$^tD&WRLqz#PnuOue}Dhq2ZKApH}x+AGTSZyVc!wx47Lt?Y5R-U_a0FMlMI>lvb78 zW~{Gqw2mI3r6xw6NVdr4KAL(=AlV}6b3M`h9}0Cc?Dt|5`BV@or7=rQ+lydEtCMFA z4iJ5_>~#A8wFS5vQ+(-`@md&Dw%ME#3>CY=n`(hCT88S6ar4}`41vp!f!qqMnHx*I zbjWd%;vVd2n72MqmQBbX2^1l7B%we9sTAvIc^?jxvx*>p)YM!f-5w_n5~heDWS1+o zdzXEMxdK3gJIB685~nt+2M>?^bXv!mvF3<$RyC74jH%dF0kogLi^4@{BF8RI}}&eW?(IDT*@UXmZNer^%NLB z_PtEXZ_X77ytaq}HehcFSsgg)yeilHHHO#znQE21*i&fm5XJA6EHf{U?z!-1Ktkbl zVe#OLY1gy%crA2qs=LTM7F^X7A6P&YinVBFvagD+8^Xd@1f9}Y+thY*|Ih?r(Zw&ps# zT*G4<-OPibb!h?^Pg*P7PYEBE9}cM#;?Es3x5=*M<<`_IH|c>s0e0AK7L`RPe6ke0 z646i5L;&%?yFxPzSc0`U!OyYUUzpazPRgm#<$0Q&5_V_LBkIJkWymo&Wq`|2@Wt;d zYKL;DEzMt$XmNJEWunsTMDW#u!|qc|ZsvDMOJr{+Gp`XJ2`|&WA(Ps(@CvK#(&_f8?v7`1bXv3)+ zSuOC8tJQL_r9O`K{fSiY@#N&Q4i9!`re|~Q&Y|?Al}Q$#cYO&tc@z96AtoH6*E+`0 zN`BGoz@QgduyNqs2gN)2KP!#GEY{sBh2_=;pX2?_>nN#BppTe=VL zljq%eWQ61RZ=C$n_Y_fp4*o@GERfQCxqne(hdTh%T(Ev}WqH)S_6?4d-8jC9t1R{$ z@sZJGPs1&4fBGiY1?~gEl@h;7N3AX^#p|>s2Gm7ueywj5^X4?dN!vFcOY z$qo}3EiNghEZGsmC*SUPGMqKXW0iz$S6d5wiXd`__mCY<4abE$K~l>vI;i6lCcHL7 z!M8iiioA%Wnsun@_m;C;i~&0f@B7Wi-L57~aCc6+-E1^}*5*q%Di&4s(3AOSTGZe+ z`lCXy@Z0GcmfLBK4>B1QQCS(VKqH8LXQ>HwK`Q0R-Y(qmR1r(;S+aZ#O9dGYOd_EGl(90*=APO8aHq;ji=#r%^Ig;)#D6&ic8gD`EFK%(5aN6V~p$O z+#`9yk<(G#5B3xS;VAI?|q5$&huW0k2OQ22hAjG zd-I7-$quw(EbXcPZ)m=2LB1My4GqAE)PGk0ft0>Rwe?a2!k+aB|iwhss zR^GKHx1A(}TGOoklLw?N_a}x9J_9U%|9@*9?6Brs*WI4D7|pA!#BhYz&2Djc5VXN- z6!e}Y=8Yby<7O6Q-QxqrHoU*`K=vE)A^U|4LKLq&Ze-@}--&q%N%!MNi1Ex(U9+AR zZ}01}r@KDu8me&Dnr$4<8spI81ab07Pw&AK{B2m~o*}APlzrk$npOX^9R7!uo~8W1 zB?XGBu9zKXEGm7<-75{&+m%T(P~r|P-+{lNyz7Ko9q36XB@1H1-rkq;cGJpIR5s&S zj3u8kWQ)2~)X#jqaaG{&LjJuV;POP7shZs_kN<2@_uPKgvnlv7^YC#gNSNcEZDuYyX zr|=|?LNtMSIs_w1W&NxY#S2}#Oeze0u+}vr|C-C`!k95Er ztwIQ~X}=-f1tCPO$7I?D_?1xE)%r~RDcFz<8B60$-ys)Q#=kaVL4jIk(a&9?IiN;% z`*AW>`U-;9DMlDrYx2gSfs0UnP|W)*Ei$|w3X+(}i0j#;{5k!@kJ;Y$44_fSg|%{@ zA^JvjR9Oo9gv)55l(o1v^!AOlxg`#i%0b}TinlIGSBVDfB?tOrt5ZGgb5bGGtFdZn z5!^-2gfN$=Salb;lJ%_Le@@09>aKCkT@Cw+gYbaa7P)tksk(5`NM3B<`*5 z&@cs-ruYsE1zQY3Ap7;qDF?!}yu93Qn!8J2#UNMzgtpAapkeF%xjtvSV>+Mwvx7*4 zKHF)LB+>H>U;;mC*A!k-7|}?O7Ed4LFmkL_Fm_UE9$2YO;TB)lVe>JT!%BvnIzCES zSnl1PjCxmI(FO_(`)aEkF3ma(EV$ja&-f>)Ep03?w4kQhcVOSFR#upUfR@1I2U&>>?so1iyB_^SX-(n4p@`avb=XjDFcP5I{M4a$)yPY;5Yu7Asnd zWW$^b#-_X3Vn!35R$5t(LM7YT0&;3_2s6`=bi~k0E>7GH@t#5(YY2N}j`+_Gym= zNo#12ij*%YB+XTVaiGIGXg$gsJJUM4WJrJQt-S;%ysgjwpj^NxCa7A@^qsyb>x7uog6EwELG)gIrgw{c56@Nt?+ip8BWt2M9FI8ggvf2oHrX} z_r2Y^^!}Ee$BRR)P9LAZ7_dch!=ovY!I6xHlC%;Zt~nRB?H|0u%ilKfnqP8t@I8BtH`gPP~gTP_^<_mg{}pHu?K*Q|2H_5|JhRfV@iN3 z)Ogz$>h&cs-twZVU(3mx5?c^SxtU9V{ods(v;|&zu)y%#Dgz?%5nqrP$aEX zw%VKM=hSmk$2v}oVQ|d~xs~tK@`^*{VDbt@rD~lV#2_9PZE!-Rv14GrVcru_cNg7QKA3 z2sD2X0r>dmKZ*>vTzymMW!GPaLGsBh3+e{6m6&r%7kuLiybTLX`MtB0uTUzY5L-$i z!{2|%D5v~LcB!?x6Ysnll)kTweT^v8QTo(Q} zX77=-BbT#y@@>B3G zn2B>1kN8?_*CPOa{Vnu25U6fw*VrBX^hUGwJ<{=ct zW=(Hiop-zgY3k#T%zX7FbC<|R`?T$buP6N=#9t;F2@znzVROzW>656a_H_)w#T@94 zefgnTU_77uG0*tabZM~L7vz|CgQ4YU$8Y`pmJP~HqnH3AeYKOY@D!&#{0jD>aPV!7 z1CKxvj6vL;@l!clY~O~uK9lKR3-N4Rc>*$@(&8Wd59ZD*D$R6n``J5_9+Pd1vFjc+ znpk5&#JV%F#2y=VO~ewx62V?4MPtRWE4GPUumlx5npi+Zbi{%cqu5YUv6tw4?DzOR z`qp>wojwPw^*oDp-~V!5zYBBJ(gn05W=WhG>sK>(s`b+vuch;N%B~mr`?p)#=ooog ztX9^VaXaG&Dc@78=G3AtYs&j^TY6aR1`??+im|RDrI!wM)l0rrfjH&(#(aQ2vr8-d zZHR9u0_!S)lsnRMa~zmZ7$k&rQQB)$KDN00bMG`7xMP3bOU$?MH2M2VWYkR6UnYlH zgWFLb$4`8ODg#P~Nl?Ryn#DIn+s~py`6@i+Hzu+$%gBJR6Um@8c&7-LbZt9u=h58j zdj4!n1`3wkvtaB2j_~k}wVcSB2zlw#R7z>XOhk?wm=_+mvv#=@c=t`I&R7NRH3X;A z>aPt+z2n5~I%ZuKdWh$`K)oqj5u|VyE^VVbR$~P20ef@gBclcMWQwP3#1(7;6Ym;$ zI>%i8qhoL!jLC^xMgRD!S=0DB;6%%b|2eWG4z&6?T2|`r;IN@Rip%I zRiD*yf%zn{%F;B`Q}^`)Pn&Es=$xQ0mdTkCR)Vszk8VJ2ZzQXS+b-^gR50e)uPr+? zca|vNB5C{+|xZF%{Oi&kK211TiM&SD_QlN zMM05T-i@RFG{p>+a-Y^P-dvV{F$e!{Un`rsj>6}%BNh1D$bM(5&LGR*Z z9*4-IL6KbeE*Y7C^(1(k1R7tpRGD9)e%2aREP8Ztxe=LpBA#o~J~1T}3h@ElQ@gA6 zr7F<>NP4;UyTua^UHSr6diIWO=j}A<(S!;1H|Ksh;}Dsm9Q%=(S)x)R50JV1Oj>3& zZdvD9S!6$!BAYZArad>+vy0WjMH?pya5{|kWLI&|_SA~gc}G@J@eE86kLr)bA};yS zq!rB#?Yi^EDIO2z(d#)TAHoTW3e&HE&mK{Dr0ZaYIIA|+Bp5(F< z-qo1!S&m0Q7a-6BLDG5~n#|AfiY>v=n3l1xpL6SC39XGhXz zTqRk2wkxolBM_(kGF;CB=hK zO3QudHA$;bd&tmTnUjpCc!&vm%^qv}*l{Xl%lMtgSlyAgJA%(&w0DO;uMJ?{6|(y^ zmh9m)Os8YRp4xHEoZ%*;j>bYzT4gaQdQz=Ilp44svr=7g$2a@CTf=FA&_n-4sqf+P zdV)A%;r z`_H7;|2(JKXvQY0GA)IraN%KHoTe!A1DltJnUa|pp+Xi{InT>!?!rC2%F4xFwu+i- zq|9%zCUR#coL9ehP=OfQRq8UkBQ*2#P72~cCV8}759RkpmQjtY>g#Ilw;5@dm#fWn zLrRc^^!9K$9&M2S7w#u!} z-AeD89*Qa)oEQz*w3zrhk$2TotcWn0_@s$@;I8$&YtI8a>X+!>{SHJ*{+C0J~Sfs|R`(Q9x;DV$=a#N`TBm3fnwbm|6Nc!KoxT+EnC(KhwB^ z!$xK`&nmsIC?c!}4bEjfJ)Kcq8KijF(?IBLpBKpi1902yGV5A5DI_!ym|o@QoH z$XmFo;4xr{vPU(!{S~#BF0~=#HsoVy?Qpl=Oe~jUgChGG6K3~IihgS)6^n%KZ7sPA zJ{g2x0Q@KAU^`J`Q$2i3?!0t?g)w2jW;`*d_MbL?Hjx~Bv-B>6xIQHiF;Ig)x-=8N zT2$yYL@Ani3?;=r3Mm%tKUlHm1X(NbUHm>@0H_M6%^38GD?m7^NiYVgmNy*BGVC6| zMEx=M!G;>#S)(6)Yx@x3)GhVYoC~C<>G^gixViD%%xxcnuw?J<%yxuX^UnXAAs<;T z)PUm0?IMOhBk#L?3Cv%4IAk8BFXAM=zO!09okemw$N<$09Tg1PV~r(tMe=GE^lEHO8s?} zSAOUGhH-hgh(~zN-Jg@ovEcDx%XPbANHNSj32v~BooiKZ-dj2UqFEdiY|IpLsxLC3 z4NM0${uM$tkCZnOph$z&&Zc=@2#oOuNZ#D8hzNzJB16_b_Y!>N#Ap%_imRMtwXvBs z@%~f1N@7t60Wz$JD&j3140#mvd26=AAwIsBP0T1DhG`XJ0glp&_8*0ki)6(OlT)95 z_YGO8Pe!CBtfQ7URyqzvi(LbJ2Yito18Xc*j;G>q#>)kXFvqeSg-q5HDu!Fu%!j3W zwQZEl2uH1eTRC~kLOMDPa!7`VmZF6K!ADsQTYY%c4ej?7)Mtb7D1=q8QCuG#5%RNu zoBslFmUm6}q|3JJzU&+riUi-LPQ}$? zG%{8*znXOKr4H7>i`73lXD*7Um?!xuIk$POReRBeW>ZE0$TpYqwo@;)6EpX)J9Uqz zh>cmItCrlUvn@XcU7wnzVG|u!LGTBbF&zW0x8t>=J;YDM|G+YBL~-1X3GrEDk=a63 z>hdG_K*ZGB2IPdo62_((!+OXl6VVMwv9k!Z$4>8Xs=WzFPPN++hbACJLvjd8nGEhj zRQ$zJA(_ZG@}wJ>ccwcs_CH5SQN2eG{rvg%iY8aVpdwOav}VMAm(93p!E<}0DZkI+ z2Kz;+93K03l_k^${}w**Iw*SUyNv^bW20xaaZ<@*Q&#?(7_r??X9*(NDaO(kp zz`a_wIa+f$budZC8V>9;p}PwThSp#5k%q+#<#}>6|MF&brvI6Th3`vU<0( zKKbTl11f&SUB?Eb{*YOO2P*x`ZP~4gtK@^d#UQaiO$zvh2a~DuV*ol#Rv#$-8d{*6 zH`X%k<<+iWH5}IPiVnAQbx$3J3BdUq24sC5iAuTlY+I=zTK=ZD33FvL(D^JB{cO;q zm^(zCe6iIdF|uB82jk`WZZaEGF74jHSzFyZohnOmeL4E{`Q0knIL>$xMV)Ee#7X=b zhqH#dQ?r_qd5|lO*)z0=7klM(>tO5|RZ|okCgspB9>moSOHJ8;m}?%jBSC(5maOzu zKr138(W^-ThdD*EmI?~ILzk8D2RU6~3ytfg&)@z~SM-Z2|3=5%6VX$Axet)+iZ4?E zXYgs1h1k|bG_D&c`PlmzIaYq{y{zS_NCTjcFwSoaq{{=FO1l+1@7wBXw7^mwKlFHu z%jpeGyewSPIRf=l6?I^#p}J<*mONn5G`4*^vXr=*NVGD3lu?k=+{t0*r!zD9*W@7G zUtvwr03xp^buwt7RFgJWf0}kXp~lKNzK|GB-aSq)Qdd{q(P)o}o0*wW@ZOMpVKiwQ zf?1X#IXt#(4VXWKd1u1NA}>UC6m@mMm}oCmAs8EbdQ^2(NYK_WrdR7-;+&6#Sv2lS zSv~eQ)1PmC{;oxJnE-aMvoS(r75d_!n%Dz3@xTmsKIrQ1h0(Nncw=lPY18NV=P~Qu7J)Q+y^3AUv-Dsd2(JOMWQ8CgqZ=Dgsw3ykuJTXH)! zTV+gLVjU`PVW6chjed7}H}Wm`0D9C4gGkh_8QgJ$Igqv$oC{1%ivl0DkLJnO9t#)x zsTceIL8|zwsL-qgN=nwyR1GJVxO%70=Ki5#vquv(5Zn>DXAWXJ;*jFb{DkANO?+a&>tosG#mk;|F+=!)-yluhIELxQM>+GYmixTrLh3&6JE%y=?y^FGSbZ@71| z^i?JB8uP&vB}=xo-FAG?GRnD0(=nc5L3favLq6PqV&-oHYMjfg{NhiKyr+xSFFJCB z>Z0D9^eZ~2Y9=b^O?$k2{eE8)t5<8$Z}PX=Qd z2y3pw#!HrFPB0PpzKmTyH5twoDx9Ku*WA7Iv)Z2*I8J}$XQVBUvhBgtdLA)Ufys)7 zsee9v|A2O8?J~=h&i9aLV;G^OK3%xzJ%+_|#Pk%W<~ zw8wh=89&&6>S?{!{=NVccp#h>mTIn{Ch$7=BU~2B&#(Q$u^DDy@X&5>-+j44 ze|M;0hy zF-i1&R6kh#lJ1yS{rGVzNsLp>AakRmLU^;KrK!2z!$r1fmi}SFA@p$pkz(#e{9&Py zCKEUf4^}fsds%ki^=n|1aD(P>a*}Ser8#$Z00GYAS>xfXXgdiR;Zp5ZBS*r(=ntMP z=jtutrgO?O^gGTtCtY(Y7U<=MUiPBBuU$tJH-HU9qwnon>BIAzGuTR*u`fZ|a3Ieu zx$?1`4sX?vI%1-6EClp|(;ij_#Y^|dP8WMq4k|3_0SHvWgq02=bj}sgO0-{~b)JQ; z$LGYg?jK;zVaxA7^nICL3zgrjdEUe*maHnI_?tjLkA53}h@rnu1_g{w7i@h{&E!T; z3yCVMuVSqGm)EY=HO<;Gv-Glnwvf&L{LGYadplzGtNn#TVBs2|zQ?sO?S)ua&gveu zj!o`fe3i#WHlBy<{I|;VAT%fl5H1D${##}8Uc;2N%lie@fBB4mPiYjAD6V?ErJ^+% zEoAvPt$G+|2Qx1WvMWeP#oMpyEe9JO?EU8*%0W7B{4Xz+$A+L=-f9hb8TjjJs&0Oo zCRs3fH-D*jRXeR35n57lHutn7AE?@nkhOPBbM{S#{v*FVdXWX6wd({QwflBml+-{E z9zMf~lK<-WkjOlwrw#z@hYh|Q7j9>!@s8TZb!u~e+@y_!H#fBy;3O)?GKCT_f24E4 zx22WzrLPGLF*a)44^}g!jdmpOW|xuZ-ST&cPnIBA3Iz;pW4rL6J@)+n{+m1^v9W2WeX50F?Ld!nW8wtK?TbE`S;GWx51K>~ipW9cHW}uQd z!{kZZif(N$%KFxNR%Z(J)>Db0+U6!Y^JQAtPKv|zR$ThX)@H4*XcGS2-H5`>OYf4K zXTzPAR2_sPo%5{RRkA#1C3}|d)$bkZ@<(A6T`D-VyW%0kX3XA3EW3sf6vO8dCL#JQ z#XUPP3@C+P?f@j2s+D>^9n$~D*StKmqWx!)55zY$&&xy;yoap;5`dNjpG^CywtCaA z857QH?`;Qn-TNHZ5#+z_Kc9eEI`>k4fF zyWy^@3uxKD%oGr{{4mENx%01yagPE9CP<+hx;l`3eHFEC4_82@>zx~V-q&|GkD&St z9}YMXJNau(rGN(3I!Jc!<;keGN-)+HA|zNqpNs295!G}d=2n+DZO?fc#jZyF_W`eK zI6r$rnitho9z6Iw1KQPo`OmO4gNF&oMQ+ZyiYMr}goJhykaIGK)zVU@O6|{Gi60C~ z{wW;|O9>3Q6XiV6zA=Qq%N+^$j1T@b*GXMK;jDto|LgLZ^KzB?V*M&O(*n3Votc21 z&{?tkpq@BYFhzbvwKMkkBMUnFwgZ~0DRrGa+F5ojE-6eDJ=7JAzCmRZa_)2Gv!2D} zmj@(!4oa<2=rr{nR|$`j<7y&JLSqS1{L3w}X3D%HQd@T!i&C3dKfdSiicz^?^l*P! z$k}u|D#vyuHI4{RSi@CyOBL@XR4{9s!~#^-xb8(;{9qU)pnm3L_PZjK;S+E0{Gx4* zV-_gzbKpJQm&@Q*bHU1lWM?(_(8_@Qyp2O!oqA5fjySBjsJ!h$awh!WSL%V14TP<& zpJ1U@TMggqeV#as-f*;Fb1AHVGVNM9JsJAOMl2nlj7-UDryV;HKkImB(YNmvwqqJE zB}J}Zk7-#q$r|#gTzV>7fpeVMD%={?7so%TwVhf6ak8#!do?6@g zMAq}jcA-D`cI^1{?DXcwP@w?zkcFUO9%A&r?AQs{WeNzSV!Lu^HDS^r;Qb#jp3R0< z1Fk2HJA91m4L;$Je5~5==t5NDOsPh5=)IcYa`;(3QL=^VPfWS#7s|Xijh?6|%qxH$ z<1Wf22L!KG#BcR4E`hrB;AK_{`4v%J+E4>Gb9ZBPh?i=dEfH^y1mpi`6&&C2 zd%6@FVB_gDH+T1uk_BV#x|&85e4o`4ub8_PB|B{-1IDo|-3OK;%k(vCoEaiJ@@C8s zzQAmGm*ct)QTENZ^Y=nc?Z(Cf+IL6Fw2Aen1j8fP#tjdNiTeIkg)B%l z4UEaZEPcrda&FWyW0TYo#Cw3;oMi1iB1*cmK+J(*u5>dI>6@v<1YAVmml%K8p*9<$ zSU!Y1zg7im<(9Y1X&w;VJqaG`?wWQ!HlJc=p(fI3yv@8$CLeo9CvsIjZYySnOQkJR z>TSXw;dX|&pEoKzS&ev~hg&vSso*2Nh5lq6n?4b%)RNrE$;0N1h*2d0U7gb*$8ZAn zhtJ`eACp?2|I4wVH<$qvA6HyDSXSQjGRSiPA^_=CaaAb=lwhA&2OjqXiNU&+DiIrd zwuSdY>V@BzL({npSeUk*D^(=3G_}pv;_a$yy`IWT+JG3MBozrkt>TejGPR3>p*#e(gEq3p=Yb7-2uPm6v z&uLI+Zsp8XuNJkIH=T1;DuAkE>&V?3s&Q3RWQBeGc;!@S>aZy@6+5`58AAUAuh_9c z1RTG0;+wgSq^%pvwT!n$h6`7hl>4mpPTk-GPTa!t;S`IR|DBAWuhxR(&HdPwKq)sCci^-UFV7qu^jSrY2TR z8lhreJ_~6)x^K~LN-x3uk>F{KD#N8dKG@_U4;l$%R>N`j)Z*d*P(tJOXe)W^PFEb# zO5gKfQ2q|f%wX;BEB=_@5B(KSa!fUsx_&nFwzBf7Sk&)?ckv{_61L>|=dkwVWuaWW zPiHGd?T+l!ruk-dS%lAhcfd8?`k-ZSPYPBet>>4K;P2J?_!=AaSgg=EI*w-1G!xlM zb;@sAh^{aA(;s*+DXm^s%`gts_!#C?NH!3a4SkAfJ&4t%99NGXD)3*{vULDAWUwv6 z&7>lt@gC`E!VYJld6cES)y@r(m{{KE`;wiUoWkh}TuuOVZquq?Z`zBbtx#n}P@0kv zM34#y0izvLVsy_l8k&91``GTA*!v>WlO{iAA|@Lu=V-U{)_kV+IIR-&Oq|B&8PBAO z8_?GAEGfy~c55y3|6CKixtdf_%93O|*EJA-Uy+w91gKe?LSozzKUE<=b5@ei_F;?V zO$%WH@@2~PNBL|DMSW{xTjFDw!J?dbkBLlBy@4w}%Pc4@Of6sv#l3<~ZO(v~aC4hm zLz9l=0To&!DX%;gsQ$r>9mm63O7oo`C_cN-IstIKWqknIiUmv0umVH~eeHNQ!k4!i zm*P#=W{_$|9C8=BdUKG@V@f-p-T=q!@uFfO6*?+cT^nhW+xG}iCn4NqV3Hd0uGXI< zUlki!4c;WNEB8cBA|wdF)Oj=!sY{<^o|Ys zx2~QQK000}^k*wwiMCeg8J1iuu#a&aZ`;)K_Wj|V;^{5Skjtl>;DH__9)RrDb>GAZ z%5&G-z%Onbn+SLBwBESwGC1CzO|TM7@#6TF4Yr&&9l-iN`aIFL(YHn5`epic^XFk) zN{uY4o|GV9+S_zYO{HrWVa*2Pu1`Hi8_bV|ThPTE0bpqH-c za~m6r9V}w}@PEwysrX~${?j~mI}-tmI*dX%Kc&-IEfLNSz4+*=r;fe72Qy8ZKjvk# zkN2JGMS~N044^z1n?7!G+X|F*QvCClT$Ra-x&za`Z8<_v*tcy=z-4Bi*EF+tYn=KF zgf_J?;D~!di&f>DTpgSikLT>aEDOWgzx@wrR~7N?$8L+ccG|PVoq>_MnEm0g(q7Yc z;FDESVm52o(vr^WML!jbT!Pk?1!?=s*Lkbu<>GCnY>ff|>`J1d#IT_qH4;mIG^heV zOw0vn+R%b6_B!gs+Zz`=$$KNdmuuU}9FpndYQGuT0Ic|H>&0{0ZF8N|dc+@{ZkIC> z^dxnB5vja8!LcAqyXWSgH);IyPD_~ZD!hcJcw&z^-*_YCE&fGIWX7?+ z4zsopSE&?YvJwO)@Z5KrlzZ`f!YwBKamc94{9$o(hTkp9PAi6h`kr&uGE+tg+DcIq{*hD`U zCk<|3h;=QPn8rW0E7^eAqqogLR+L>A@yW=&lhkHY2|IT!ET6H9lZ0z={t2hxUDXi1 zj-vb2#XjBBA^j{YchDU9B<-btL5f7IzM`I&H~XGtw|hpZyOmw=%r0LbrD7K7?}0Or z#0UpHm3Yrng7-r_U;_|(Fht_<9&Ab}rl*ic;GVK+%Svz20r4d<9cfp5uZ_tMDyc$S2k&8cFyHdSf%ZCo<-*zF=?bhf^26IA67 zB3lRREd5HN2Y|1S!4}5Nc1G)xMgBTf&jMbIinBEQFUJ z*+>MqGn4{)KQJH=7n%QQ!Z*a#8f{?<;J&k?nVEtSX)N zO{ufeCK3d>%q3`mA_9_B8;~G9`#IxKXWNzI_###aFwsNhj+vJbs6H>RG^lfU*$wBu z4sXR9g-dF(X|g^JZ|jo5jenE|+0!yNaB5g59wX7Pu1{Y9zVc?!dCQq|9{~9IvmEKR zqWht_w~wA$N7%GeTk)n{t~EgHAQc3Oj9Z59RqLarn)AwPn%gI0zA=M>-GgtuFsVGq zf=FHu;wG#JBn_Ak=!_>nM)v$!2-I5-n_6GE)8!>N;aZWzQgz{anp+?R)!6TmrJq1J zl-nmg4*k1Xam1&WLQo(__~$ZC&tc|JRf=SIt~ zRXW#s9K3o-JDaHX$$e9MtYs&$8b#12QTI)J9wkxkQf7DzO=M>R z+&h|g?Aw*>)9r9xb^>l;X!SC&1PMSby(~mmD!pzIG4h^(RNhojxO|dydWcL1nL2o# zdS*$v2EVN;kkZt$C5Wzt5fm65W@H5gxfI>Muk0jzEkcwcthFCHspfw%cv~P;1r)6ofds#2`4k4v8|J7dGotQQxwv=;wBCr|N@YqX*?~ESB1M6&HBuGpZh5G=Rb-aP+7ZVr)P)%uIQqbVV!7r%fS;_=MDCo)Knj5;h(h(k#tU#^OK5cJ{qx z8dXBN>F`->FdyWxVugv+K7ezZea;pKN}cS|BCO3p7E|ARk~u0VPNf0P(h#|~Ln_+O zSKq!GhRT%gtsDYNOz1vX5Q`caCBl_@xEX+T;+H|!={Wm-Kr9kmxcUTAl!u`WAIg- z_#vzL-#9V1grOu|Z|Mmo{Ov+DC7QC)lLmO_1PjEA@gFh&H`gv zcGfkzRuEn)X!rzB$&TWSt^Zi7q)P84Xge-skMLbAbrh^UB?*ejvf@&V{gWV!dr7_p zOawaE!7rHD5bS~1ZaFzu?NIvxxOH8fkDTkVY@^AmzTX)3Me*4ApNJekVkTs%p` zwmS%g$#OVRyHg?3LoB4gv4SsOjIW?r0k4wec|Xby#oz*3mqY|eY)&$-eF=Xt(Yt;Z z+(5Oju(XTzL>*XC17?5q84oJi?buOKhZik~v#u ze>L;==Z`8c%`=63!Tj;Trq31s*|h#FFuq*Z0AeHNSYu_t83JxiKrG)A)W0sxfyRF= z6g0yHZ*rO|*klYx%*}gvNSBQ$UwDLjIh7yDH)IVlILLLaVMO)v!|N?WZ$FiV^L_aW zK2OYDB_Y}ihQUPf&0kCyJL*{_C#fBpiQRenhLO)p1G8NTQ=_~-fQ+he%Te5?GU$B2 z0;`h*$&$lmjc=u<;x|?eM2{5%RZJPxa3z%ZKBO5Tjcw13pGz_ojhVdA zHcGXxHI1$9+VX9qoL`;6m~Gy-S5hc%>@3S_uU8f!f6($6kDWSg>nCZnVqa+xdhXc#|6dS^Dd^`mRuo7X)1R6 zccv>0IKVXv+Zz=fEtKQ%np-mu z?x@wKS=rR^ik5LbHb9LpCflI(@^>c?sPVN7>8%l{Z2LrBd;*OL!8XAKJ|};`ETcLr z4;&wF9zyiM&6*F<4dM8IVM>r={D<*e^ZKAiX>%NiN}!U%8t|(1l-qRV^DxJlW<6RK zJ7Ce;p0J0PHTwnnUz$$9Wu|du?TQF-cHN*n4muMpf|BctaLfJjML}g-eb`k>1EfpV04Ks zLEZwr|6?4HcKz1kJ9><{fSv}HCbijP)K;0;x@jI^T#<+%+U5)i_+RN@cS_0e4G77K48xxLGQw(EDOd3uS|HSp7) z4>Y)H(eX#A5|&%T$F$l;C)v%wzRM3hBs-WAaTz%jgCdHu&bS;PD-+tGj~{o_x7^ ztihiprQl?2FJmi<6~yyxU9+g$VR$3?*P2@QK)I`Y56u23e7f}VN9W)unxt%^U(b!O zM^CI}=oPmd`>f8xd@x(Yna}#Z_X=>IhQ1rU9C>UcB(?08?R-=xYpuKM%*`1JOC>uC zPUwtZ39Fw}%{*I@rzj+AXK{Ta{aA~2ASgwzu3k1A98MQ3TSvbc9=R8GC=Ey9Dltoy z3&trur}v{qf}*~;hii_r@s`wOV6csQ;4ddPUQY*wDAF$$TV%B&RlGS`_8qbXS^%c);*hBQDB1f!)pRi0hp*-zsGW;ZL5=$ zK0+LXF;KyciYl0i`qg>J7=z3YK-PXWVpzN!hoz?fRxi!_02j!9(L; zcQa01d{`rTbtMMB7NC`XBG%KW-l_`gYs}59_KAUyd2RzQK4xgd|lHoW(Dq0l8 z9VjRyAqVfoa{g>Z=$vmeVk5Aeni8I;*d$K|f5=X_N;Gcns~5R;id-~qcioP9CTTYB zTO3mdTS!KdV@VcCgS3#WC!^-J7?QbV#&P2P|EtDw^e-!8f@P82Ae_lRG}IT6ScqbD zHbf|Pd*1Apd`ZmqyP0^-XzJv2x-(z-=0l1v;W?KWbXA!?!8@+A5qJzDEgrLJm31_9 zsgbEk+p8^kH`2KhSRiaNt9T;a^;fKn3{qc_mtezh<})2bW2N=^8B* z)2wj7qh5ZIzv{P3VX286bgi{jU_0Vv{)y!GszJe`YkItUKS4k7C1zO=VQ&a-SV2L% z!ZxmIY$VGvV#*Z1=7N%xb8Lepxhv2+NF^sV4y7uaxbhpWh*NLJTO~nxXCmeeVZqem z`RA@^6R~+|ambQ^h;*@yie7u>qVU9`MJ$!MP|!Rhh-F>;%qBdMcWrr3hZt;U$^Y0Lx?21mRtsx-A|J?n36-UmwNBM z$LN4amdcKZ%$u$(1&Z21PTbVrR~k(X#F;5AUgF4Lot(+$%(YL4K7PMk^i}|h1AUK! zl%|K|h9JJs#5Q?ZR2BN2Z3wupEy&j{AXC=t4hZ2IV2-a+O64znI{2;wstQ_6f_z8b zJyA>SrZ|dMydVGlNJx7*}mzLk{N!# zp5kqan5Ygb25brzXdF}-9(|zgtZ!B-PX>Za+pofaA9rN z)M5zzCl;)e$pq&XJ&ju$d7#YNjMVA~5ma5=#S(4DgyieQAqr}+Mjv~kSHQ=B zJ%{Wx=B*g!t!Yey9%@c7%C$fSnLo)*1-`+ZqrGEa%9es|Zh#{Ui3F^{L$B_4H8z2W zYcsM>ThH6e13cz->O4qlAAJx!@JGA+n5&{=xRU9hAR|{#vOsC9+SbZmDGx*+BuEv@ ztbv&D-CzSM$3mFeOoiG7WviXqw!qv$3}20?W^u`mnopg_f}6_5DthZmIPZ_}Eyu8j6DWQT=8b`&@wsn)ouGNCNZ(r0? z7jZZ>5Ey0>eeY8MhHJ^nG-lDU{OeKKtn95u0dn?V;lV}C>3yTI4Q1{Fi-mo6Oye<{ zAorLDAn^oUgtdKq_c>+|XAOP*t|w-fMGy^&i)U5FQD=vXMK>hQSS?emmR6QjYq>|8 z?uybZAzWu@Xvpc+m7sV3s}oH-OxbsNn@XU2jrh*D+Fc9V)vGsrthtLKkL}UBUL9hIP!+?eok1!eEKMBh|uH4(;in2+Js{!Vm43`ff z9H80w+nCn6_Qx~BTSr7FzLcW6G0ou;8|LBb7AO?PxvU7c1>3|v?s}8j<_V}>;?-Qy zS*zFqt@IN4HxaxHiihH$e9uaeU971iJNkzL^_=mKKRhkjn7Cs4ZdRn-?t*}8Y?;56 z9#r$^sQ^MiN#H74MR{e<_>{XkL0MPVPI%e#p6EZHjb_jg5N`7J5eN zuiUxK@%ubUJvDDeRXH16J@(bUA)R;@>9#FaCladEK9SxQY@`ta%e48CU!{qihI<4# z_WI@9ZArQB+0DLcYn)-{lm`&>&C_uF#6fD?nv1<`5S~&JX6~vh5z?Z6FC@DRqLu4T z;u7YeHX0vf^%}43(!+BO<62YHYBV4nPp-UF7GBsi85ut~@c4Z!7!jH`%t)vK!`bE{ z`S9#mqCn^hMbn;8A+@5TYXQ6A&fLToQO;nT<=EWtuwjQ+uUGn@8t!M=kE@4U=|-lQNJ~TiQgo{+Y{C3B09@JgMpKNh*frLB>oWb0 zCTALXj)0d7Q!{ltj_eH<;a=1*kb^bE?l5Ye8ac2MVQV?C-veMxH)LnW*h7lVrFJ)6yVlTf*LB)*lL$yH6)T8*Wpb4TfC_G+6A zDhJHcL`w&TeNM;6^4OaSDZ`NE{<(l!7$gwtT`(zw9fkwhi~WL^2Wwd%uDwa8yyNu$ z{Mb|bY~f|ktooF7Q|WG5PDvEu*ZtyZgE0289aUswh*LjOqAhkcR>mT9U3QME!^;7M zw)1u$)_Q}~nOu2?w=Ms|xkjPO?!CAwM1G!J`Lpps-vR)t0=w&Q@8yKxksdN4(qvN) zT`cG6c`j!=7g;(l>zCO-*0&m9Z~jrhbi0&5w&+v6$4k3d6c5wyoQw_StfJz_g}Pa>Oh5*Wc&U3X2Jj9*PGe>qvp& z4?UDL%mvLm_$_kRT2}>7g=`O**J01)Cb`s{*g^od9(J(w(L;xSv^V%3wBMT-KA^rL zi7UDm1vRJ%_CE@i8m);bOvZ5Ed*6djt(?r+#f5txkbgd=%PMdy~>TA}nay zpuoAwBVrPPn@$HLtx)6{0Bt9YgZY;qHYVn?g0DAiY$RX%f?jHP_|?+Q`^+KGRO;tu`z}6B*jbrm8Q-tO$@iyH#{Hv(g}>p>nl?3u zVeYYgUgEKOhYIACI%jTlV)qx4ump&mWOdeb_V*R9qr$Iz|O*Vs9+BUtaswXgr{ zLHz&xp}0VJ#qL?1GlMK&sr1(mR%isy5C}(rYLFQZ1}6=aG6$1&Ada=xY&1Pf>__J# z+r@nM3`ZU2!gr9qiqgNI6OD(2n31xn{l1M%3sVVHWOwsDhdUpiy7Z#Z=nwwHCDRZFdwHh3sK6ZH zteJ0nfjPeVVw=Va`eT+{JKpFEt$9=8Z!2@%powp+s9Cq#2#4uk(Q6&UJy0pJiB~T} zgql9A4k&){wE%Tco3wD(eeQY%$#)nCUi{j6^E|Q9f1Ot#xx@RK{e76Mg7s z!M4r}l>LqrxxYBs)f2rqQbbuRALZA9`Q{sUin}96Yl0KIRl~?I+upE~w~TTn7=V0l z&f$BT-Q9$p1SVF&8774{yan7WAD1(G`F`W`g0Z6^iw9wqL&^%<~-~Pnimc{N9 zb2U9)h_J|gt&rTEaO1Z{B^@!bO#sk@R1BQk@ND|v!zCwe zRK2fzznnxXCjAByM_A$u)HrKS)W3vUErJG*3}%>wso}9%-5Ow`uKR(9h*Ea*Z>6Vg zn%{HXiJy{7&}pY%(OM=Dl}mW zoFWvh8p@b}dP}?6*MiF27L*6=x)F{wvECO3d@eirJ(~^LzDbK?NdJB17Qc9Z!9APZ z32S|S4rb+=tn&!jMylB7(bZ3Di}a_MA9X!LyC8XIM{FUL#HUZqV>GpeH~h3>iN*WZ zA~OEIqP)At7&zReMgw7X4B2Z(XxGCaJnk;9Mqr12sV(+$&CVquL3IUQw}ng7OtC&s zMR)mg&hh7+H?niqtJI(V@DGupNzDt=>MMtsZLN2ms_5X^I@e)6C<4rEJxj9T*cq^-HaOF!Yvd7ZTsU15xM0yu2UWUe>+0?!NzBKYr(c)FfTbD-X3l#|$TYg2|KKEfxzeWe9 z1{_A1oM*__f+eYkk-5?)S04og-%BjEX|8f2>fc6Jb)#+hH-%=ddIrcbypE2H#%A)ytuLA3eXvv(Zi7~9^YPXDk$7@%lIu}?ifV3`Rs*#< z$P}apatw~&tHDRW43gE_B+>h}sCIc2lj^)>GS0v0%Xq6N<}@bRXqGy@Gi`>i^|7x( zgq#$2u|#Kb<+t^Ey|c=uX(K0*RF8~rN@gpVZ>?4Jyn@wm(6-Gaf%duGZ7@;Pa6LOy zXXR1``nHn$?<*JIn*09#(@eq} zf1wz}=}-*RYJD~h7A$L+O1#UtvWafp3I1q1xz~Z-jUOnvve2y)#gmxwsJ5X;YIg4( z{j+)JVUz%@J>@&4uH66U>HdWZYr!P1{a!rNq$!Wu`RG|eAv~<-8>#oA-0Ht$zAyWA)avbR^xvw)>;8-MSS8COE3?vGmyq~EKSR9xMt(7z zB=vJF+A#{pl~QfoyZ7yi`k(*Luaf^ifAD~=Kvm+CYRM1KJZx_u+TT1&#ltoG*~0-V zofT`YOsf~Jl20q=LGDQ2VLKBYjc<*B3Xl2G*=G0Glcu9NvpITrKgfLDGYgk^`+STh zclM)T?UaxFeejF^GG{%uxS_Y!74q)?wD(<6O>N)1SdRteAiYb6P?V~afJ&1Vj3J>1 zB@`hP=@1Z(dXNqx(jh1aAqfE~p%{JxK4?T8IJyW=YO(rp$ddD&+IcmFt29y!%*CTNx#rQ?k;3gC+% zGWt`YGk)I^raNb4SJeCLMMrHAT_XQ&H~Pu&zBPiky@Z~~=Z0ssj&{UTyR?b~F(@r# zvj%OawQTcht>G6Pe-YEi0a3|p_H@jP=jY-F4(>Td>wN5w`GJs)jI2~p2ep{#GjI61 z!S=uEr3=pa@)-JFrD;oDrON!-)B=848Q$Xb;9UN>%pnv5%mt0B%p$f3yQky_Joiq; z%X{QIN*W}pqi6~H9Ud})Y_trX5G@TlcDf4>{=vRIuUl46=FNLHWX!6U|FP%7bQR!TQ2+%U)+OD4N!(ssme@!D@I4y0{GQ6*R z!%}fde>B+1=aP@FYroza@DixY1!6$97<4uaPxzrCB>^+vR_GzNE?eB9>pcrW&Q0A>!qX|lk%MIom_}+nke5@&K;B=ElHVwA+{8>)m-6s zgKYnG&Uhs-&@`XMD`G-Z#YK`M*dDbW*R53tGuC#l23RgEB|zcKhbL;6O@sJCzt#Vc z36f!Vh$7WAHTl7}7>u~YySF&pi~8mO)RZ3_;d?ZW+VI4d1&@L*ADi{Va=B{5{+3l2 z>(qXKm4g1{@?nCN6gfnMwz0LMU*U@DN{JZZYlKpKuVBjkvCh2j2e?Xz=CLwF_6{{R zHaDIx?(7p1@%EoTC*q>@3NnRP2bF_%%_+NfC6dTV0U`>_Om_C!p@`gj2p?m$bKl$P zJchu%x!-XnP-Z)d_B{;Eb!P$-Y+(o94U0-_%`9w9KkTb@UC+KWj5j!OPYQ~zaU*4D zCrCSkvX(iiQQCrnNw-(mTuyx5sY`maaP7eXe;|m}%sMKdTT=T}$zB9wMFDuBrt7D*E8t*C4J9c6ZCu`!`-p@I;u z-~5`~EE0`av-Z+UNH;~m>h~1Q5JP6|{&yN=Qj*4@0(U}8tL?$){zC#y=C?)Y%{ z^1)Qk4y-yO6&(? z0|iL%@=#f0$1EBZSwR-rf1wRc8Fb1LbvS^ z+7RNDpa4(r(whS&KC3{9T$K|^NVD>HL6y|P@b}7Wa@9J5i9UFEdNz5S8n}L7X%Nhi zSTB!IZrSDLF3Q_Ni%NqzEt?F-{6;jl?gz%@G#_?zac4O++^s8=98BOSR%>PqiXII%vZor0QWFtPGL+_Hmq;*GT_lpY@R*uebQWBRUHad=4 zC6ptr3X;E#bgi5^VEVaGdm_a0!DxF_oxE=HVm@d^Gteo5l=X7nmnh=h#m8bJXF*#55eeCLKHe(LCpw#$E0iyq8bkPF{Y~x4zC$aPJOxAb zCk~R$K4*$C41AAj1`%<6LZ>XBqPZl{yC$uZe4;mW@NvG0AnLUY3XyRty#5K=IKX zE2s~!viMni){(_$TTcsZd{wrtt%r>HB%1NQ!sXq>@oJkfz;|d#`{AuCj8E^!wc~r5 zJa9W0RZ#%GA<&rUrW=ltCZ#w!6hT!-idue00QW= z&waYSGz<%cSE^}mM1|zoblRJA3KT0A;g$e6>R^@vw*7>Wk%^+=?_?wRBBCBXsfKc{ zb4x5NL+@{(d@D2f!str>!Jh;=I=*qRvZDo)y5o#&%oyUe85Z+e{g19STw z&tKfRCjRg3FB?!yvr}qsm1EV=(mGG25NmL_@eMPCR4A;yw^@NRjUX8)tg30$P%E0F#zdqv~24|RH3TOQ#EiYH&1+|@} zkH_8Ybf&2Rpck*S)`R#1S_|x02mCoCM$cGc-HwwaeIe$N`+lNg!O95dXPt9Ig-AtF z+XJjHK`Y8bKws4*AUzh800kvt8e_u~C3;Lz78=toJ$)&XNnZ@5JtdF&9u#{u5l7nZ z@QB4|tXoW`Fh4=>P(UO5pk8hyOLpKO%OMB7OlE9>k{^d z(EW`IxZo0OY##ur z7i(%7Yr*}tWH_Ql)4#*?Q?5wt^BfJD-LAaxuGkm^jWI=2lHB+2T$6nMUGDob>!Q4( z1|+9~!|A$=hSdjKiDo{%!ke4QWMvbo7VPJ~K#D3&n$aolDz%)$m+E z)sg8WmN#x2KOHv3a$G6f>_OjwQh%zCPBSQB5zWW%`u5;c!#M>!XkdGk`6LUFMu!X` zHJx~Jy2-tE@%Mmo4!hhdxH^*(uR&vts+H9)u)(>{hShw|X1^MBbL9ug;PBn&>}gdVYTnGjPv5+u%O zVcj?J+ZW9;3x*C;<>~>qSkf2M*z*r{9kUaDX0@$!s_??)^iPMiPhj^acLILV zeb2xayO25FZ+lF7Vij#`VUx6Xz4zQnQ9i%h#?y5ph6g%F3*-~zDzw@2)nrD(&Uu!Xq1_WRKOw_YmepYLY z_Z7co^i;8v^Gm)lR7*#&3i2IS2S~rp{=40DAL?CqejmL@?aS`Rl&r^H2#-qhXw9Wb zs0ZNXq>;A%qtz%wq+;Qd5JGO0g`{1DZQ{k?_5HY_)jiMlpuXEhe_YnTfhj)REn02w z2mt6zc1xDXvS+!e3RLN`5ZolF3xT@4AA9x$hx{aY^IHUQw&NcZIFyx@W?{IjV$<|I zEOiSR681#cJoJGRz4M{POPjG}V-b(m)^(C3an>{Gvu!kYu?$OnK%Bb&eDe!KXbSey zD0|C3486Q2IcW2GkUPBni(y=+nfiCFlS@%_vI+8o9_t(`3R|1Yjpev)U?<`e2&!tn zpaorExvvQYF@Wwow%aUW^QP*s(C6gf>9@@yHmuhrOkBK2+-x|$-0dxsNNyE^)vd9Y3>$l*qjcdB3oDvX zr2ou)D(Ept!eB{G&iqX@%VH#SFH=71&3#6;*jL*U=H0xI5Mx@mNB&kX_vVM=u^~1o zcUdTaOUNk(IU>lSq$9HdMfUF#p;vv3Tvr;)j1|h%kWVe*TG!%$=0#<~o~bcTg8m>D zCNwlipy>)zN8%J>D-8iL?X>UCC+Ia(P-zp2Z1^g+7_`5!n>t@jqq>_yp!Y(?aIVCs z*OHwVTppDPhJ)F#J%YzA2Y^6}X-S=P8JH{@jL*Pq*zr+^SNo0~cB7F#kT?N|Sm6ze z?EcQy9OyTHYBzRjcEh_!T2Ta#lxhy`o7p^$O&Qs-hB<;9$s~3&AgoqA!M|UVyckNtQ526cnzwyzLXvmkwPN;;~vSZ=CB3 z^4ihPsSS@*xglm2lLC(-0xO==AniF|E5Tky{{w-8!dUJ|_S;4;M@_Vy0RFZu_4h6m zF#|;mLa_W49zdXJ5s}3_#u$zk^8MDhiW{}3S4@w;G=Z`lfF4SRlh>xa-8U7WD7HTj zMEP1Jwg0}_{%z^~KNRSL{2ARn0hkD{tzG{2svMiMSlYz&i%#j{hp<1cKK^^n|0!#L zzA*LrOyTXxou9@Juev+WWe>F%_D{|e8FmJszDds zXd#;;yN~l-hzw}?>?R)9O7bpgBuzl)X3^Ih4l)Pv1N*x^&krNFnwb)!BgC?_w6(Y( z?}$oU)(eosu2q$k#ixZ*C^8c9X$M7KBeu5OLqlIJcqTvtp-^M3Shef7*FjG2zy+`) z;pD{yr2~alJoW6+&Y%*i3S#P*`W`2##GO9EB}Nj+Dh?8GQ?CwH3a&veGqFmqpEx(U zz65(4i`a~NyBkoM(@ zulJkEg?4)dz7FMQs4B75#dT>K!-KAN^YUHdfAFLj_cT_#^nPl0T6ICEUR1fVs8l|w z1VdP(It8?C&CORpPrN!Lmh};N?Cn~;;wUBJe&E}7m)%Fq7w={bV)$9)jLg{t5MSz;fm-?Kftl=Y&C-U+HPM5n-Y>{&T+ntx!0042 zk1*qJ-F$7}8q;avndsnD2Mt8@zHQoh07LYv9vY1)gg~0*4ncl4FAHYW<3AA2hmi9` z$fg_5;|HoN9`@E>$`dMLwL&LIdYX8unqWmJRTWwwG-Dvsp9g%D3*u}$idaITkMM-j zqe*}oL~UIv zV*vrXESCyHCnFY&;gH7ZC(jH_E9B3l&*IzGq`-NfUl|ya2@ij>h~x0yuExoS`_CFN z)G8VA#WtPDRRk84#Ov6VMuAEm5gr3{$G7?)y%@rtu40ZeL$9|}(Y$=Oa?giMN$YT6 ze{#iI7^?Rz*2aeOJHvkx)TEzkMPp|-t7Ft?%VdL{nE5xCvlv2hY_EN&f>4Q$0CgD* z z4`2K6ei=&QL5*6t_(;aQW`VXAYX9s@_uH=H6*GD8q{4HRFd;>shl^ZY4{kh*@3y48 z+2wJ^6jeA?TBdor)~>GbNLZKLrRpVqB=_|+ zuV8DhCLcnEd?l4&bD4Zk6&6x^z+u zu}~XBF{$1>U68+Yy&kNecUo>=&|Ag%4lcncv{M{y@$_DH)ryCD_y-X{K<5e!v+bef zEx%b*a^&hOpkUW-i6Yirp~>MM1VEXA0)+JcIx{C6a_yqZ!B^ z6x&zhPNkkU-Tl9fs{Ahj4B*mR;0aXyKz-@<2k`Ib#Q**iz%cq{4c6g_QCmIOItXMw zdBBt`8q?&GcrQDyt^4)S?_pBI`8D4G@`TyA~*rl}mGe#lNtvCwHco6S3gm8y#uA4z36mk{`ISKP4L)!~HXkZ8cen0$! z-5DMIv%gPBbaV_>s-Q>5tmfnd8z*Y9QfrIA84t+V=(t;_r1DOR##wLSFS@SFcRAD@ zJgj9yPqVFQia!A=9}YjNZ*bD6iDsgn$p&@J*V!=~YuEpxyT9iv{8KD=lm;q1{MK?o zbbS#V$ezGk?}@0sFkaspwhL8dl`zH1;YUNZ9(Ru-#u`2$fBx^E^v{%mrtSHaC^2CZ zAt&4+OFc6JY)MfMzJmGKwlXy*8ti02ivho97~B|fZH@5|6tMAXW(u|!IqlT^MYon2 zfqr@I!C&7Qz_=Di`k@yDKkBV=>b7RFx!2tBi!O_>;CGy$jzY_g(L%t1MyEM$r|<_F z7wLqb{Z;87zRtz>*SCPq2FlM0`J-e}z6Whnqlv%h$n(Fezg5Vp$4D~#%&^`e+|AW6 zvgL1D+DfsI3dqXw;JT#{7O{ZjjiDW0GNGBafa>qs@6OF6T4<6c7EWMtnHN;uCvAed z&`8|zz~l(38nRkDOd1-lxsiP|k~x7{G)F>bNf z97W1TJFC*36{j6$&v%&Y)b>>7x|iC=p<`J^ds#aGi(dHS`2_-Wlch$K+jG{)Wv`;A z?AR1Z=�jChd{jS5$7~-Hq2_lJ0)ED;N&t9D&Ivd}xWa+0VTBY7w6l7KEPQNh~Sy zpX{!xTP(#yhnnZ&`wz#1I0@K!n<4Wqnn1%?cu^%nyHGM!Q%4ChJ!v&RJ!i=tm2KL{ z9?;|YFw)3 zEp$G=*pvqt@BvaTTgGdnz5n=Vecv*I!qPU>p7lj*Ot(aN$;SI^al?X0&DT0ogkr7H6I z!@G|qEp<6y^OBVDVwqwhP(T_^*)Rz$eB`I#1pGR2s*qJC;ak=yp(82{v2K26ujsU; zy-FqMgAzf9T_~j*Vr%pYveG)1kE#D4HZAIn`>X|f@uMLitVV<6gRb$^$6?m0( za<8&(D>upe+y+glpicCx@2>$g3#`c8rv&?{NJymN2f1%5Tcoq z^e=^(XGUgTAI;pyu{Rz?nDNJzXzn70=RGaSGhoPc-ftDW2+Y!I$uHX$-|JXf}zIP_`FF5{Sc&HQ z0q@UP>tw01F%TZ|4C2FD(1-JO8!7zZUq{0{^@Pf`5(t7sdA{t^fc4 literal 0 HcmV?d00001 diff --git a/Projects/2. Large Language Models and LangChain/chat_models.py b/Projects/2. Large Language Models and LangChain/chat_models.py new file mode 100644 index 0000000..6ef9c7a --- /dev/null +++ b/Projects/2. Large Language Models and LangChain/chat_models.py @@ -0,0 +1,14 @@ +from langchain.chat_models import ChatOpenAI +from langchain.schema import ( + HumanMessage, + SystemMessage +) + +chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) + +messages = [ + SystemMessage(content="You are a helpful assistant that translates English to French."), + HumanMessage(content="Translate the following sentence: I love programming.") +] + +chat(messages) \ No newline at end of file diff --git a/Projects/2. Large Language Models and LangChain/few_short.py b/Projects/2. Large Language Models and LangChain/few_short.py new file mode 100644 index 0000000..a59110f --- /dev/null +++ b/Projects/2. Large Language Models and LangChain/few_short.py @@ -0,0 +1,58 @@ +from langchain import PromptTemplate +from langchain import FewShotPromptTemplate +from langchain.chat_models import ChatOpenAI +from langchain import LLMChain + +# create our examples dictionery +examples = [ + { + "query": "What's the weather like?", + "answer": "It's raining cats and dogs, better bring an umbrella!" + }, { + "query": "How old are you?", + "answer": "Age is just a number, but I'm timeless." + } +] + +# create an example template +example_template = """ +User: {query} +AI: {answer} +""" + +# create a prompt example from above template +example_prompt = PromptTemplate( + input_variables=["query", "answer"], + template=example_template +) + +# now break our previous prompt into a prefix and suffix +# the prefix is our instructions +prefix = """The following are excerpts from conversations with an AI +assistant. The assistant is known for its humor and wit, providing +entertaining and amusing responses to users' questions. Here are some +examples: +""" +# and the suffix our user input and output indicator +suffix = """ +User: {query} +AI: """ + +# now create the fe w-shot prompt template +few_shot_prompt_template = FewShotPromptTemplate( + examples=examples, + example_prompt=example_prompt, + prefix=prefix, + suffix=suffix, + input_variables=["query"], + example_separator="\n\n" +) + +# load the model +chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.9) + +chain = LLMChain(llm=chat, prompt=few_shot_prompt_template, verbose=True) + +ask_question = chain.run(input("Ask your question: ")) + +print(ask_question) \ No newline at end of file diff --git a/Projects/2. Large Language Models and LangChain/gpt4all.py b/Projects/2. Large Language Models and LangChain/gpt4all.py new file mode 100644 index 0000000..b6a9437 --- /dev/null +++ b/Projects/2. Large Language Models and LangChain/gpt4all.py @@ -0,0 +1,18 @@ +# Import modules +from langchain.llms import GPT4All +from langchain import PromptTemplate, LLMChain +from langchain.callbacks.base import CallbackManager +from langchain.callbacks.manager import CallbackManager +from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler + +template = """Question: {question} + +Answer: Let's think step by step.""" +prompt = PromptTemplate(template=template, input_variables=["question"]) + +callback_manager = CallbackManager([StreamingStdOutCallbackHandler()]) +llm = GPT4All(model="E:/Backup/Documents/Softwares/GPT4All/Models/ggml-wizardLM-7B.q4_2.bin", callback_manager=callback_manager, verbose=True) +llm_chain = LLMChain(prompt=prompt, llm=llm) + +question = "What happens when it rains somewhere?" +llm_chain.run(question) \ No newline at end of file diff --git a/Projects/2. Large Language Models and LangChain/question_answering.py b/Projects/2. Large Language Models and LangChain/question_answering.py new file mode 100644 index 0000000..e3b03f0 --- /dev/null +++ b/Projects/2. Large Language Models and LangChain/question_answering.py @@ -0,0 +1,31 @@ +from langchain import PromptTemplate +from langchain import HuggingFaceHub, LLMChain +from dotenv import load_dotenv + +load_dotenv() + +template = """Question: {question} + +Answer: """ +prompt = PromptTemplate( + template=template, + input_variables=['question'] + ) + +# user question +question =input("") + +# initialize Hub LLM +hub_llm = HuggingFaceHub( + repo_id='google/flan-t5-large', + model_kwargs={'temperature':0} +) + +# create prompt template > LLM chain +llm_chain = LLMChain( + prompt=prompt, + llm=hub_llm +) + +# ask the user question about the capital of France +print(llm_chain.run(question)) \ No newline at end of file diff --git a/Projects/2. Large Language Models and LangChain/text_summarization.py b/Projects/2. Large Language Models and LangChain/text_summarization.py new file mode 100644 index 0000000..39b2a7f --- /dev/null +++ b/Projects/2. Large Language Models and LangChain/text_summarization.py @@ -0,0 +1,14 @@ +from langchain.chat_models import ChatOpenAI +from langchain.chains import LLMChain +from langchain.prompts import PromptTemplate + +llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) + +summarization_template = "Summarize the following text to one sentence: {text}" +summarization_prompt = PromptTemplate(input_variables=["text"], template=summarization_template) +summarization_chain = LLMChain(llm=llm, prompt=summarization_prompt, verbose=True) + +text = "LangChain provides many modules that can be used to build language model applications. Modules can be combined to create more complex applications, or be used individually for simple applications. The most basic building block of LangChain is calling an LLM on some input. Let’s walk through a simple example of how to do this. For this purpose, let’s pretend we are building a service that generates a company name based on what the company makes." +summarized_text = summarization_chain.predict(text=text) + +print(summarized_text) \ No newline at end of file diff --git a/Projects/2. Large Language Models and LangChain/track_usage.py b/Projects/2. Large Language Models and LangChain/track_usage.py new file mode 100644 index 0000000..6a859df --- /dev/null +++ b/Projects/2. Large Language Models and LangChain/track_usage.py @@ -0,0 +1,9 @@ +from langchain.llms import OpenAI +from langchain.callbacks import get_openai_callback + +llm = OpenAI(model_name="text-davinci-003", n=2, best_of=2) + +with get_openai_callback() as cb: + result = llm("Tell me a joke") + cost = cb.total_cost + print("$",round(cost, 5)) \ No newline at end of file diff --git a/Projects/3. Learning How to Prompt/AI-Powered News Summarizer b/Projects/3. Learning How to Prompt/AI-Powered News Summarizer new file mode 160000 index 0000000..ec9b8bd --- /dev/null +++ b/Projects/3. Learning How to Prompt/AI-Powered News Summarizer @@ -0,0 +1 @@ +Subproject commit ec9b8bdfb269240368931e2f18322ca75fc5a20d diff --git a/Projects/3. Learning How to Prompt/chain_prompting.py b/Projects/3. Learning How to Prompt/chain_prompting.py new file mode 100644 index 0000000..209f604 --- /dev/null +++ b/Projects/3. Learning How to Prompt/chain_prompting.py @@ -0,0 +1,40 @@ +from langchain import PromptTemplate, LLMChain +from langchain.llms import OpenAI + +# Initialize LLM +llm = OpenAI(model_name="text-davinci-003", temperature=0) + +# Prompt 1 +template_question = """What is the name of the famous scientist who developed the theory of general relativity? +Answer: """ +prompt_question = PromptTemplate( + template=template_question, + input_variables=[]) + +# Prompt 2 +template_fact = """Provide a brief description of {scientist}'s theory of general relativity. +Answer: """ +prompt_fact = PromptTemplate( + input_variables=["scientist"], + template=template_fact) + +# Create the LLMChain for the first prompt +chain_question = LLMChain(llm=llm, prompt=prompt_question) + +# Run the LLMChain for the first prompt with an empty dictionary +response_question = chain_question.run({}) + +# Extract the scientist's name from the response +scientist = response_question.strip() + +# Create the LLMChain for the second prompt +chain_fact = LLMChain(llm=llm, prompt=prompt_fact) + +# Input data for the second prompt +input_data = {"scientist": scientist} + +# Run the LLMChain for the second prompt +response_fact = chain_fact.run(input_data) + +print("Scientist:", scientist) +print("Fact:", response_fact) diff --git a/Projects/3. Learning How to Prompt/news_summarizer_ output_parsers.py b/Projects/3. Learning How to Prompt/news_summarizer_ output_parsers.py new file mode 100644 index 0000000..88cbd69 --- /dev/null +++ b/Projects/3. Learning How to Prompt/news_summarizer_ output_parsers.py @@ -0,0 +1,58 @@ +from langchain.output_parsers import PydanticOutputParser +from pydantic import validator +from pydantic import BaseModel, Field +from typing import List +from langchain.prompts import PromptTemplate +from langchain.llms import OpenAI + + + +# create output parser class +class ArticleSummary(BaseModel): + title: str = Field(description="Title of the article") + summary: List[str] = Field(description="Bulleted list summary of the article") + + # validating whether the generated summary has at least three lines + @validator('summary', allow_reuse=True) + def has_three_or_more_lines(cls, list_of_lines): + if len(list_of_lines) < 3: + raise ValueError("Generated summary has less than three bullet points!") + return list_of_lines + +# set up output parser +parser = PydanticOutputParser(pydantic_object=ArticleSummary) + +# create prompt template +# notice that we are specifying the "partial_variables" parameter +template = """ +You are a very good assistant that summarizes online articles. + +Here's the article you want to summarize. + +================== +Title: {article_title} + +{article_text} +================== + +{format_instructions} +""" + +prompt = PromptTemplate( + template=template, + input_variables=["article_title", "article_text"], + partial_variables={"format_instructions": parser.get_format_instructions()} +) + +# Format the prompt using the article title and text obtained from scraping +formatted_prompt = prompt.format_prompt(article_title=article_title, article_text=article_text) + +# instantiate model class +model = OpenAI(model_name="text-davinci-003", temperature=0.0) + +# Use the model to generate a summary +output = model(formatted_prompt.to_string()) + +# Parse the output into the Pydantic model +parsed_output = parser.parse(output) +print(parsed_output) \ No newline at end of file diff --git a/Projects/3. Learning How to Prompt/news_summarizer_extended copy.py b/Projects/3. Learning How to Prompt/news_summarizer_extended copy.py new file mode 100644 index 0000000..dc7ea4f --- /dev/null +++ b/Projects/3. Learning How to Prompt/news_summarizer_extended copy.py @@ -0,0 +1,79 @@ +import os +import json +from dotenv import load_dotenv +import requests +from newspaper import Article +from langchain.schema import ( + HumanMessage +) +from langchain.chat_models import ChatOpenAI + +load_dotenv() + +headers = { + 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36' +} + +article_url = "https://www.artificialintelligence-news.com/2022/01/25/meta-claims-new-ai-supercomputer-will-set-records/" + +session = requests.Session() + +try: + response = session.get(article_url, headers=headers, timeout=10) + + if response.status_code == 200: + article = Article(article_url) + article.download() + article.parse() + + # print(f"Title: {article.title}") + # print(f"Text: {article.text}") + else: + print(f"Failed to fetch article at {article_url}") +except Exception as e: + print(f"Error occurred while fetching article at {article_url}: {e}") + +# we get the article data from the scraping part +article_title = article.title +article_text = article.text + +# prepare template for prompt +template = """ +As an advanced AI, you've been tasked to summarize online articles into bulleted points. Here are a few examples of how you've done this in the past: + +Example 1: +Original Article: 'The Effects of Climate Change +Summary: +- Climate change is causing a rise in global temperatures. +- This leads to melting ice caps and rising sea levels. +- Resulting in more frequent and severe weather conditions. + +Example 2: +Original Article: 'The Evolution of Artificial Intelligence +Summary: +- Artificial Intelligence (AI) has developed significantly over the past decade. +- AI is now used in multiple fields such as healthcare, finance, and transportation. +- The future of AI is promising but requires careful regulation. + +Now, here's the article you need to summarize: + +================== +Title: {article_title} + +{article_text} +================== + +Please provide a summarized version of the article in a bulleted list format. +""" + +# Format the Prompt +prompt = template.format(article_title=article.title, article_text=article.text) + +messages = [HumanMessage(content=prompt)] + +# load the model +chat = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.0) + +# generate summary +summary = chat(messages) +print(summary.content) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Character_Text_Splitter.py b/Projects/4. Keeping Knowledge Organized with Indexes/Character_Text_Splitter.py new file mode 100644 index 0000000..b4cf9a4 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Character_Text_Splitter.py @@ -0,0 +1,14 @@ +from langchain.document_loaders import PyPDFLoader +from langchain.text_splitter import CharacterTextSplitter + +loader = PyPDFLoader("The One Page Linux Manual.pdf") +pages = loader.load_and_split() + +text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=20) +texts = text_splitter.split_documents(pages) + +print(texts[0]) + +print (f"You have {len(texts)} documents") +print ("Preview:") +print (texts[0].page_content) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Cohere_embeddings.py b/Projects/4. Keeping Knowledge Organized with Indexes/Cohere_embeddings.py new file mode 100644 index 0000000..43da6f2 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Cohere_embeddings.py @@ -0,0 +1,29 @@ +import cohere +from langchain.embeddings import CohereEmbeddings + +# Initialize the CohereEmbeddings object +cohere = CohereEmbeddings( + model="embed-multilingual-v2.0", + cohere_api_key="95itqFSDUc4PMznsGaIqDUaGBRhwWG6sLE4XBnTD" +) + +# Define a list of texts +texts = [ + "Hello from Cohere!", + "مرحبًا من كوهير!", + "Hallo von Cohere!", + "Bonjour de Cohere!", + "¡Hola desde Cohere!", + "Olá do Cohere!", + "Ciao da Cohere!", + "您好,来自 Cohere!", + "कोहेरे से नमस्ते!" +] + +# Generate embeddings for the texts +document_embeddings = cohere.embed_documents(texts) + +# Print the embeddings +for text, embedding in zip(texts, document_embeddings): + print(f"Text: {text}") + print(f"Embedding: {embedding[:5]}") # print first 5 dimensions of each embedding \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Customer Support Question Answering Chatbot b/Projects/4. Keeping Knowledge Organized with Indexes/Customer Support Question Answering Chatbot new file mode 160000 index 0000000..cf412e4 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Customer Support Question Answering Chatbot @@ -0,0 +1 @@ +Subproject commit cf412e45f654572e4f6b7807b2a10577e71c1e92 diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/.streamlit/secrets.toml b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/.streamlit/secrets.toml new file mode 100644 index 0000000..f43d13a --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/.streamlit/secrets.toml @@ -0,0 +1,3 @@ +OPENAI_API_KEY=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 +ACTIVELOOP_TOKEN=eyJhbGciOiJIUzUxMiIsImlhdCI6MTY4NzM1NDkyMSwiZXhwIjoxNzE4OTc3MjYwfQ.eyJpZCI6ImRqcGFwemluIn0.ezQ-ChtNr76KXlEr_lSJuxsa9im6I2X9BeDpGzMWzdz2kkC7OI9zAD2iQ8LO98gdKC0gYRcE3FAAX0FnYgXeTg +ACTIVELOOP_ID=djpapzin \ No newline at end of file diff --git a/Projects/DataChad/__pycache__/constants.cpython-310.pyc b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/__pycache__/constants.cpython-310.pyc similarity index 100% rename from Projects/DataChad/__pycache__/constants.cpython-310.pyc rename to Projects/4. Keeping Knowledge Organized with Indexes/DataChad/__pycache__/constants.cpython-310.pyc diff --git a/Projects/DataChad/__pycache__/utils.cpython-310.pyc b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/__pycache__/utils.cpython-310.pyc similarity index 100% rename from Projects/DataChad/__pycache__/utils.cpython-310.pyc rename to Projects/4. Keeping Knowledge Organized with Indexes/DataChad/__pycache__/utils.cpython-310.pyc diff --git a/Projects/DataChad/app.py b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/app.py similarity index 100% rename from Projects/DataChad/app.py rename to Projects/4. Keeping Knowledge Organized with Indexes/DataChad/app.py diff --git a/Projects/DataChad/constants.py b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/constants.py similarity index 100% rename from Projects/DataChad/constants.py rename to Projects/4. Keeping Knowledge Organized with Indexes/DataChad/constants.py diff --git a/Projects/DataChad/requirements.txt b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/requirements.txt similarity index 100% rename from Projects/DataChad/requirements.txt rename to Projects/4. Keeping Knowledge Organized with Indexes/DataChad/requirements.txt diff --git a/Projects/DataChad/utils.py b/Projects/4. Keeping Knowledge Organized with Indexes/DataChad/utils.py similarity index 100% rename from Projects/DataChad/utils.py rename to Projects/4. Keeping Knowledge Organized with Indexes/DataChad/utils.py diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Deep_Lake_Vector_Store_embeddings.py b/Projects/4. Keeping Knowledge Organized with Indexes/Deep_Lake_Vector_Store_embeddings.py new file mode 100644 index 0000000..0b64c76 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Deep_Lake_Vector_Store_embeddings.py @@ -0,0 +1,41 @@ +from langchain.embeddings.openai import OpenAIEmbeddings +from langchain.vectorstores import DeepLake +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.chat_models import ChatOpenAI +from langchain.chains import RetrievalQA + +# create our documents +texts = [ + "Napoleon Bonaparte was born in 15 August 1769", + "Louis XIV was born in 5 September 1638", + "Lady Gaga was born in 28 March 1986", + "Michael Jeffrey Jordan was born in 17 February 1963" +] +text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) +docs = text_splitter.create_documents(texts) + +# initialize embeddings model +embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") + +# create Deep Lake dataset +# TODO: use your organization id here. (by default, org id is your username) +my_activeloop_org_id = "langchain_course_deeplake" +my_activeloop_dataset_name = "langchain_course_embeddings" +dataset_path = f"hub://{my_activeloop_org_id}/{my_activeloop_dataset_name}" +db = DeepLake(dataset_path=dataset_path, embedding_function=embeddings) + +# add documents to our Deep Lake dataset +db.add_documents(docs) + +# create retriever from db +retriever = db.as_retriever() + +# istantiate the llm wrapper +model = ChatOpenAI(model='gpt-3.5-turbo') + +# create the question-answering chain +qa_chain = RetrievalQA.from_llm(model, retriever=retriever) + +# ask a question to the chain +qa_chain.run("When was Michael Jordan born?") + diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Embedding_Models.py b/Projects/4. Keeping Knowledge Organized with Indexes/Embedding_Models.py new file mode 100644 index 0000000..a9f14ed --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Embedding_Models.py @@ -0,0 +1,9 @@ +from langchain.llms import HuggingFacePipeline +from langchain.embeddings import HuggingFaceEmbeddings + +model_name = "sentence-transformers/all-mpnet-base-v2" +model_kwargs = {'device': 'cpu'} +hf = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs) + +documents = ["Document 1", "Document 2", "Document 3"] +doc_embeddings = hf.embed_documents(documents) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Google Drive loader.py b/Projects/4. Keeping Knowledge Organized with Indexes/Google Drive loader.py new file mode 100644 index 0000000..f4c7ced --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Google Drive loader.py @@ -0,0 +1,8 @@ +from langchain.document_loaders import GoogleDriveLoader + +loader = GoogleDriveLoader( + folder_id="your_folder_id", + recursive=False # Optional: Fetch files from subfolders recursively. Defaults to False. +) + +docs = loader.load() diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/MarkdownTextSplitter.py b/Projects/4. Keeping Knowledge Organized with Indexes/MarkdownTextSplitter.py new file mode 100644 index 0000000..efc6017 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/MarkdownTextSplitter.py @@ -0,0 +1,41 @@ +from langchain.text_splitter import MarkdownTextSplitter + +markdown_text = """ +# + +# Welcome to My Blog! + +## Introduction +Hello everyone! My name is **John Doe** and I am a _software developer_. I specialize in Python, Java, and JavaScript. + +Here's a list of my favorite programming languages: + +1. Python +2. JavaScript +3. Java + +You can check out some of my projects on [GitHub](https://github.com). + +## About this Blog +In this blog, I will share my journey as a software developer. I'll post tutorials, my thoughts on the latest technology trends, and occasional book reviews. + +Here's a small piece of Python code to say hello: + +\``` python +def say_hello(name): + print(f"Hello, {name}!") + +say_hello("John") +\``` + +Stay tuned for more updates! + +## Contact Me +Feel free to reach out to me on [Twitter](https://twitter.com) or send me an email at johndoe@email.com. + +""" + +markdown_splitter = MarkdownTextSplitter(chunk_size=100, chunk_overlap=0) +docs = markdown_splitter.create_documents([markdown_text]) + +print(docs) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/NLTK_Text_Splitter.py b/Projects/4. Keeping Knowledge Organized with Indexes/NLTK_Text_Splitter.py new file mode 100644 index 0000000..2757dd2 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/NLTK_Text_Splitter.py @@ -0,0 +1,9 @@ +from langchain.text_splitter import NLTKTextSplitter + +# Load a long document +with open('my_file.txt', encoding= 'unicode_escape') as f: + sample_text = f.read() + +text_splitter = NLTKTextSplitter(chunk_size=500) +texts = text_splitter.split_text(sample_text) +print(texts) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/PyPDFLoader.py b/Projects/4. Keeping Knowledge Organized with Indexes/PyPDFLoader.py new file mode 100644 index 0000000..f007648 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/PyPDFLoader.py @@ -0,0 +1,6 @@ + +from langchain.document_loaders import PyPDFLoader +loader = PyPDFLoader("E:/Backup/Documents/Books/Sex Smart - Chapter 1.pdf") +pages = loader.load_and_split() + +print(pages[0]) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/app.py b/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/app.py new file mode 100644 index 0000000..ecc5e54 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/app.py @@ -0,0 +1,181 @@ +import langchain +from langchain.document_loaders import ApifyDatasetLoader +from langchain.utilities import ApifyWrapper +from langchain.document_loaders.base import Document +import os + +os.environ["APIFY_API_TOKEN"] = db.secrets.get("APIFY_API_TOKEN") + +apify = ApifyWrapper() +loader = apify.call_actor( + actor_id="apify/website-content-crawler", + run_input={"startUrls": [{"url": "djpapzin"}]}, + dataset_mapping_function=lambda dataset_item: Document( + page_content=dataset_item["text"] if dataset_item["text"] else "No content available", + metadata={ + "source": dataset_item["url"], + "title": dataset_item["metadata"]["title"] + } + ), +) + +docs = loader.load() + +from langchain.text_splitter import RecursiveCharacterTextSplitter + +# we split the documents into smaller chunks +text_splitter = RecursiveCharacterTextSplitter( + chunk_size=1000, chunk_overlap=20, length_function=len +) +docs_split = text_splitter.split_documents(docs) + +from langchain.embeddings.cohere import CohereEmbeddings +from langchain.vectorstores import DeepLake +import os + +os.environ["COHERE_API_KEY"] = db.secrets.get("COHERE_API_KEY") +os.environ["ACTIVELOOP_TOKEN"] = db.secrets.get("APIFY_API_TOKEN") + +embeddings = CohereEmbeddings(model = "embed-english-v2.0") + +username = "elleneal" # replace with your username from app.activeloop.ai +db_id = 'kb-material'# replace with your database name +DeepLake.force_delete_by_path(f"hub://{username}/{db_id}") + +dbs = DeepLake(dataset_path=f"hub://{username}/{db_id}", embedding_function=embeddings) +dbs.add_documents(docs_split) + +from langchain.vectorstores import DeepLake +from langchain.embeddings.cohere import CohereEmbeddings +from langchain.retrievers import ContextualCompressionRetriever +from langchain.retrievers.document_compressors import CohereRerank +import os + +os.environ["COHERE_API_KEY"] = db.secrets.get("COHERE_API_KEY") +os.environ["ACTIVELOOP_TOKEN"] = db.secrets.get("ACTIVELOOP_TOKEN") + +@st.cache_resource() +def data_lake(): + embeddings = CohereEmbeddings(model = "embed-english-v2.0") + + dbs = DeepLake( + dataset_path="hub://elleneal/activeloop-material", + read_only=True, + embedding_function=embeddings + ) + + retriever = dbs.as_retriever() + retriever.search_kwargs["distance_metric"] = "cos" + retriever.search_kwargs["fetch_k"] = 20 + retriever.search_kwargs["maximal_marginal_relevance"] = True + retriever.search_kwargs["k"] = 20 + + compressor = CohereRerank( + model = 'rerank-english-v2.0', + top_n=5 + ) + + compression_retriever = ContextualCompressionRetriever( + base_compressor=compressor, base_retriever=retriever + ) + + return dbs, compression_retriever, retriever + +dbs, compression_retriever, retriever = data_lake() + + +from langchain.chat_tools import ConversationBufferWindowMemory + +@st.cache_resource() +def memory(): + memory=ConversationBufferWindowMemory( + k=3, + memory_key="chat_history", + return_messages=True, + output_key='answer' + ) + return memory + +memory=memory() + + +from langchain.chat_models import AzureChatOpenAI + +BASE_URL = "" +API_KEY = db.secrets.get("AZURE_OPENAI_KEY") +DEPLOYMENT_NAME = "" +llm = AzureChatOpenAI( + openai_api_base=BASE_URL, + openai_api_version="2023-03-15-preview", + deployment_name=DEPLOYMENT_NAME, + openai_api_key=API_KEY, + openai_api_type="azure", + streaming=True, + verbose=True, + temperature=0, + max_tokens=1500, + top_p=0.95 +) + +from langchain.chains import ConversationalRetrievalChain + +qa = ConversationalRetrievalChain.from_llm( + llm=llm, + retriever=compression_retriever, + memory=memory, + verbose=True, + chain_type="stuff", + return_source_documents=True +) + +# UI code + +if st.sidebar.button("Start a New Chat Interaction"): + clear_cache_and_session() + +if "messages" not in st.session_state: + st.session_state.messages = [] + +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +def chat_ui(qa): + + if prompt := st.chat_input("Ask me questions: How can I retrieve data from Deep Lake in Langchain?"): + + st.session_state.messages.append({"role": "user", "content": prompt}) + + with st.chat_message("user"): + st.markdown(prompt) + + with st.chat_message("assistant"): + message_placeholder = st.empty() + full_response = "" + + memory_variables = memory.load_memory_variables({}) + + with st.spinner("Searching course material"): + response = capture_and_display_output( + qa, ({"question": prompt, "chat_history": memory_variables}) + ) + + full_response += response["answer"] + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + + source = response["source_documents"][0].metadata + source2 = response["source_documents"][1].metadata + with st.expander("See Resources"): + st.write(f"Title: {source['title'].split('·')[0].strip()}") + st.write(f"Source: {source['source']}") + st.write(f"Relevance to Query: {source['relevance_score'] * 100}%") + st.write(f"Title: {source2['title'].split('·')[0].strip()}") + st.write(f"Source: {source2['source']}") + st.write(f"Relevance to Query: {source2['relevance_score'] * 100}%") + + st.session_state.messages.append( + {"role": "assistant", "content": full_response} + ) + +chat_ui(qa) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/chatbot.py b/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/chatbot.py new file mode 100644 index 0000000..60d8d3a --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/chatbot.py @@ -0,0 +1,108 @@ +import databutton as db +import streamlit as st +import io +import re +import sys +from typing import Any, Callable + +# Verbose Display Code for Streamlit +def capture_and_display_output(func: Callable[..., Any], args, **kwargs) -> Any: + # Capture the standard output + original_stdout = sys.stdout + sys.stdout = output_catcher = io.StringIO() + + # Run the given function and capture its output + response = func(args, **kwargs) + + # Reset the standard output to its original value + sys.stdout = original_stdout + + # Clean the captured output + output_text = output_catcher.getvalue() + clean_text = re.sub(r"\x1b[.?[@-~]", "", output_text) + + # Custom CSS for the response box + st.markdown(""" + + """, unsafe_allow_html=True) + + # Create an expander titled "See Verbose" + with st.expander("See Langchain Thought Process"): + # Display the cleaned text in Streamlit as code + st.code(clean_text) + + return response + +# Create a button to trigger the clearing of cache and session states +if st.sidebar.button("Start a New Chat Interaction"): + clear_cache_and_session() + +# Initialize chat history +if "messages" not in st.session_state: + st.session_state.messages = [] + +# Display chat messages from history on app rerun +for message in st.session_state.messages: + with st.chat_message(message["role"]): + st.markdown(message["content"]) + +def chat_ui(qa): + # Accept user input + if prompt := st.chat_input( + "Ask me questions: How can I retrieve data from Deep Lake in Langchain?" + ): + + # Add user message to chat history + st.session_state.messages.append({"role": "user", "content": prompt}) + + # Display user message in chat message container + with st.chat_message("user"): + st.markdown(prompt) + + # Display assistant response in chat message container + with st.chat_message("assistant"): + message_placeholder = st.empty() + full_response = "" + + # Load the memory variables, which include the chat history + memory_variables = memory.load_memory_variables({}) + + # Predict the AI's response in the conversation + with st.spinner("Searching course material"): + response = capture_and_display_output( + qa, ({"question": prompt, "chat_history": memory_variables}) + ) + + # Display chat response + full_response += response["answer"] + message_placeholder.markdown(full_response + "▌") + message_placeholder.markdown(full_response) + + #Display top 2 retrieved sources + source = response["source_documents"][0].metadata + source2 = response["source_documents"][1].metadata + with st.expander("See Resources"): + st.write(f"Title: {source['title'].split('·')[0].strip()}") + st.write(f"Source: {source['source']}") + st.write(f"Relevance to Query: {source['relevance_score'] * 100}%") + st.write(f"Title: {source2['title'].split('·')[0].strip()}") + st.write(f"Source: {source2['source']}") + st.write(f"Relevance to Query: {source2['relevance_score'] * 100}%") + + # Append message to session state + st.session_state.messages.append( + {"role": "assistant", "content": full_response} + ) + +# Run function passing the ConversationalRetrievalChain +chat_ui(qa) diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/requirements.txt b/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/requirements.txt new file mode 100644 index 0000000..ddb556c --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/RAG_Chatbot/requirements.txt @@ -0,0 +1,9 @@ +langchain +deeplake +openai +cohere +apify-client +watchdog==3.0.0 +streamlit==1.26.O +databutton==O.36.O +ipykernel==6.25.1 \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Recursive_Character_Text_Splitter.py b/Projects/4. Keeping Knowledge Organized with Indexes/Recursive_Character_Text_Splitter.py new file mode 100644 index 0000000..a801d5b --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Recursive_Character_Text_Splitter.py @@ -0,0 +1,16 @@ +from langchain.document_loaders import PyPDFLoader +from langchain.text_splitter import RecursiveCharacterTextSplitter + +loader = PyPDFLoader("The One Page Linux Manual.pdf") +pages = loader.load_and_split() + +text_splitter = RecursiveCharacterTextSplitter( + chunk_size=50, + chunk_overlap=10, + length_function=len, +) + +docs = text_splitter.split_documents(pages) +for doc in docs: + print(doc) + diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/SeleniumURLLoader.py b/Projects/4. Keeping Knowledge Organized with Indexes/SeleniumURLLoader.py new file mode 100644 index 0000000..0cbc864 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/SeleniumURLLoader.py @@ -0,0 +1,11 @@ +from langchain.document_loaders import SeleniumURLLoader + +urls = [ + "https://www.youtube.com/watch?v=TFa539R09EQ&t=139s", + "https://www.youtube.com/watch?v=6Zv6A_9urh4&t=112s" +] + +loader = SeleniumURLLoader(urls=urls) +data = loader.load() + +print(data[0]) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/Similarity_search_and_vector_embeddings.py b/Projects/4. Keeping Knowledge Organized with Indexes/Similarity_search_and_vector_embeddings.py new file mode 100644 index 0000000..2960412 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/Similarity_search_and_vector_embeddings.py @@ -0,0 +1,32 @@ +import openai +import numpy as np +from sklearn.metrics.pairwise import cosine_similarity +from langchain.embeddings import OpenAIEmbeddings + +# Define the documents +documents = [ + "The cat is on the mat.", + "There is a cat on the mat.", + "The dog is in the yard.", + "There is a dog in the yard.", +] + +# Initialize the OpenAIEmbeddings instance +embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") + +# Generate embeddings for the documents +document_embeddings = embeddings.embed_documents(documents) + +# Perform a similarity search for a given query +query = "A cat is sitting on a mat." +query_embedding = embeddings.embed_query(query) + +# Calculate similarity scores +similarity_scores = cosine_similarity([query_embedding], document_embeddings)[0] + +# Find the most similar document +most_similar_index = np.argmax(similarity_scores) +most_similar_document = documents[most_similar_index] + +print(f"Most similar document to the query '{query}':") +print(most_similar_document) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/SpacyTextSplitter.py b/Projects/4. Keeping Knowledge Organized with Indexes/SpacyTextSplitter.py new file mode 100644 index 0000000..0bb4d34 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/SpacyTextSplitter.py @@ -0,0 +1,14 @@ +from langchain.text_splitter import SpacyTextSplitter + +# Load a long document +with open('my_file.txt', encoding= 'unicode_escape') as f: + sample_text = f.read() + +# Instantiate the SpacyTextSplitter with the desired chunk size +text_splitter = SpacyTextSplitter(chunk_size=500, chunk_overlap=20) + +# Split the text using SpacyTextSplitter +texts = text_splitter.split_text(sample_text) + +# Print the first chunk +print(texts[0]) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/TextLoader.py b/Projects/4. Keeping Knowledge Organized with Indexes/TextLoader.py new file mode 100644 index 0000000..bf1389d --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/TextLoader.py @@ -0,0 +1,10 @@ +from langchain.document_loaders import TextLoader + +# Replace 'my_file.txt' with the actual path to the text file you want to load +loader = TextLoader('my_file.txt') + +# Load the documents from the text file +documents = loader.load() + +# You can print the documents to see the output +print(documents) diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/TokenTextSplitter.py b/Projects/4. Keeping Knowledge Organized with Indexes/TokenTextSplitter.py new file mode 100644 index 0000000..2613ee9 --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/TokenTextSplitter.py @@ -0,0 +1,12 @@ +from langchain.text_splitter import TokenTextSplitter + +# Load a long document +with open('my_file.txt', encoding= 'unicode_escape') as f: + sample_text = f.read() + +# Initialize the TokenTextSplitter with desired chunk size and overlap +text_splitter = TokenTextSplitter(chunk_size=100, chunk_overlap=50) + +# Split into smaller chunks +texts = text_splitter.split_text(sample_text) +print(texts[0]) \ No newline at end of file diff --git a/Projects/4. Keeping Knowledge Organized with Indexes/indexes_retrievers.py b/Projects/4. Keeping Knowledge Organized with Indexes/indexes_retrievers.py new file mode 100644 index 0000000..13ddeab --- /dev/null +++ b/Projects/4. Keeping Knowledge Organized with Indexes/indexes_retrievers.py @@ -0,0 +1,87 @@ +from langchain.document_loaders import TextLoader +from langchain.text_splitter import CharacterTextSplitter +from langchain.embeddings import OpenAIEmbeddings +from langchain.vectorstores import DeepLake +from langchain.chains import RetrievalQA +from langchain.llms import OpenAI +from langchain.retrievers import ContextualCompressionRetriever +from langchain.retrievers.document_compressors import LLMChainExtractor +from dotenv import load_dotenv + +load_dotenv() + + +# text to write to a local file +# taken from https://www.theverge.com/2023/3/14/23639313/google-ai-language-model-palm-api-challenge-openai +text = """Google opens up its AI language model PaLM to challenge OpenAI and GPT-3 +Google is offering developers access to one of its most advanced AI language models: PaLM. +The search giant is launching an API for PaLM alongside a number of AI enterprise tools +it says will help businesses “generate text, images, code, videos, audio, and more from +simple natural language prompts.” + +PaLM is a large language model, or LLM, similar to the GPT series created by OpenAI or +Meta’s LLaMA family of models. Google first announced PaLM in April 2022. Like other LLMs, +PaLM is a flexible system that can potentially carry out all sorts of text generation and +editing tasks. You could train PaLM to be a conversational chatbot like ChatGPT, for +example, or you could use it for tasks like summarizing text or even writing code. +(It’s similar to features Google also announced today for its Workspace apps like Google +Docs and Gmail.) +""" + +# write text to local file +with open("my_file.txt", "w") as file: + file.write(text) + +# use TextLoader to load text from local file +loader = TextLoader("my_file.txt") +docs_from_file = loader.load() + +# create a text splitter +text_splitter = CharacterTextSplitter(chunk_size=400, chunk_overlap=20) + +# split documents into chunks +docs = text_splitter.split_documents(docs_from_file) + +# Before executing the following code, make sure to have +# your OpenAI key saved in the “OPENAI_API_KEY” environment variable. +embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") + +# create Deep Lake dataset +# TODO: use your organization id here. (by default, org id is your username) +my_activeloop_org_id = "langchain_course_deeplake" +my_activeloop_dataset_name = "langchain_course_indexers_retrievers" +dataset_path = f"hub://{my_activeloop_org_id}/{my_activeloop_dataset_name}" +db = DeepLake(dataset_path=dataset_path, embedding_function=embeddings) + +# add documents to our Deep Lake dataset +db.add_documents(docs) + +# create retriever from db +retriever = db.as_retriever() + +# create a retrieval chain +qa_chain = RetrievalQA.from_chain_type( + llm=OpenAI(model="text-davinci-003"), + chain_type="stuff", + retriever=retriever +) + +query = "How Google plans to challenge OpenAI?" +response = qa_chain.run(query) +print(response) + +# create GPT3 wrapper +llm = OpenAI(model="text-davinci-003", temperature=0) + +# create compressor for the retriever +compressor = LLMChainExtractor.from_llm(llm) +compression_retriever = ContextualCompressionRetriever( + base_compressor=compressor, + base_retriever=retriever +) + +# retrieving compressed documents +retrieved_docs = compression_retriever.get_relevant_documents( + "How Google plans to challenge OpenAI?" +) +print(retrieved_docs[0].page_content) \ No newline at end of file diff --git a/Projects/5. Combining Components Together with Chains/Adding_Transcripts_to_Deep_Lake.py b/Projects/5. Combining Components Together with Chains/Adding_Transcripts_to_Deep_Lake.py new file mode 100644 index 0000000..31b8ced --- /dev/null +++ b/Projects/5. Combining Components Together with Chains/Adding_Transcripts_to_Deep_Lake.py @@ -0,0 +1,106 @@ +import os +from dotenv import load_dotenv +import yt_dlp +import whisper +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.docstore.document import Document +from langchain.vectorstores import DeepLake +from langchain.embeddings.openai import OpenAIEmbeddings +from langchain.prompts import PromptTemplate +from langchain.chains import RetrievalQA + +load_dotenv() + +openai_api_key = os.environ['OPENAI_API_KEY'] +activeloop_token = os.environ['ACTIVELOOP_TOKEN'] + +def download_mp4_from_youtube(urls, job_id): + # This will hold the titles and authors of each downloaded video + video_info = [] + + for i, url in enumerate(urls): + # Set the options for the download + file_temp = f'./{job_id}_{i}.mp4' + ydl_opts = { + 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]', + 'outtmpl': file_temp, + 'quiet': True, + } + + # Download the video file + with yt_dlp.YoutubeDL(ydl_opts) as ydl: + result = ydl.extract_info(url, download=True) + title = result.get('title', "") + author = result.get('uploader', "") + + # Add the title and author to our list + video_info.append((file_temp, title, author)) + + return video_info + +urls=["https://www.youtube.com/watch?v=mBjPyte2ZZo&t=78s", + "https://www.youtube.com/watch?v=cjs7QKJNVYM",] +video_details = download_mp4_from_youtube(urls, 1) + +# load the model +model = whisper.load_model("base") + +# iterate through each video and transcribe +results = [] +for video in video_details: + result = model.transcribe(video[0]) + results.append( result['text'] ) + print(f"Transcription for {video[0]}:\n{result['text']}\n") + +with open ('text.txt', 'w') as file: + file.write(results['text']) + +# Load the texts +with open('text.txt') as f: + text = f.read() + +# Split the documents +text_splitter = RecursiveCharacterTextSplitter( + chunk_size=1000, chunk_overlap=0, separators=[" ", ",", "\n"] + ) +texts = text_splitter.split_text(text) + +# pack all the chunks into a Documents: +docs = [Document(page_content=t) for t in texts[:4]] + +# import Deep Lake and build a database with embedded documents: +embeddings = OpenAIEmbeddings(model='text-embedding-ada-002') + +# create Deep Lake dataset +# TODO: use your organization id here. (by default, org id is your username) +my_activeloop_org_id = "langchain_course_deeplake" +my_activeloop_dataset_name = "langchain_course_youtube_summarizer" +dataset_path = f"hub://{my_activeloop_org_id}/{my_activeloop_dataset_name}" + +db = DeepLake(dataset_path=dataset_path, embedding_function=embeddings) +db.add_documents(docs) + +# retrieve the information from the database, +# construct a retriever object. +retriever = db.as_retriever() #measures "distance" or similarity between different data points in the database. +retriever.search_kwargs['distance_metric'] = 'cos' #use cosine similarity as its distance metric. used in information retrieval to measure the similarity between documents or pieces of text. +retriever.search_kwargs['k'] = 4 # return the 4 most similar or closest results according to the distance metric when a search is performed. + +# RetrievalQA chain is useful to query similiar contents from databse and use the returned records as context to answer questions. +prompt_template = """Use the following pieces of transcripts from a video to answer the question in bullet points and summarized. If you don't know the answer, just say that you don't know, don't try to make up an answer. + +{context} + +Question: {question} +Summarized answer in bullter points:""" +PROMPT = PromptTemplate( + template=prompt_template, input_variables=["context", "question"] +) + +chain_type_kwargs = {"prompt": PROMPT} +qa = RetrievalQA.from_chain_type(llm=llm, + chain_type="stuff", + retriever=retriever, + chain_type_kwargs=chain_type_kwargs) + +print( qa.run("Summarize the mentions of google according to their AI program") ) \ No newline at end of file diff --git a/Projects/5. Combining Components Together with Chains/Jarvis Voice Assistant b/Projects/5. Combining Components Together with Chains/Jarvis Voice Assistant new file mode 160000 index 0000000..58a7cab --- /dev/null +++ b/Projects/5. Combining Components Together with Chains/Jarvis Voice Assistant @@ -0,0 +1 @@ +Subproject commit 58a7cab9c7b924d077e7d2d7b72121bf04410201 diff --git a/Projects/5. Combining Components Together with Chains/LLMChain.py b/Projects/5. Combining Components Together with Chains/LLMChain.py new file mode 100644 index 0000000..75dd969 --- /dev/null +++ b/Projects/5. Combining Components Together with Chains/LLMChain.py @@ -0,0 +1,128 @@ +from langchain import PromptTemplate, OpenAI, LLMChain + +prompt_template = "What is a word to replace the following: {word}?" + +# Set the "OPENAI_API_KEY" environment variable before running following line. +llm = OpenAI(model_name="text-davinci-003", temperature=0) + +llm_chain = LLMChain( + llm=llm, + prompt=PromptTemplate.from_template(prompt_template) +) + +# __call__ method +llm_chain("artificial") + +# .apply() method +input_list = [ + {"word": "artificial"}, + {"word": "intelligence"}, + {"word": "robot"} +] + +llm_chain.apply(input_list) + +# .generate() method +llm_chain.generate(input_list) + +# .predict() method +prompt_template = "Looking at the context of '{context}'. What is an appropriate word to replace the following: {word}?" + +llm_chain = LLMChain( + llm=llm, + prompt=PromptTemplate(template=prompt_template, input_variables=["word", "context"])) + +llm_chain.predict(word="fan", context="object") + +# or .run() method +llm_chain.run(word="fan", context="object") + +#Parsers +from langchain.output_parsers import CommaSeparatedListOutputParser + +output_parser = CommaSeparatedListOutputParser() +template = """List all possible words as substitute for 'artificial' as comma separated.""" + +llm_chain = LLMChain( + llm=llm, + prompt=PromptTemplate(template=template, output_parser=output_parser, input_variables=[]), + output_parser=output_parser) + +llm_chain.predict() + +# Conversational Chain (Memory) +from langchain.chains import ConversationChain +from langchain.memory import ConversationBufferMemory + +output_parser = CommaSeparatedListOutputParser() +conversation = ConversationChain( + llm=llm, + memory=ConversationBufferMemory() +) + +conversation.predict(input="List all possible words as substitute for 'artificial' as comma separated.") + +conversation.predict(input="And the next 4?") + +# Sequential Chain +from langchain.chains import SimpleSequentialChain + +overall_chain = SimpleSequentialChain(chains=[LLMchain_one, LLMchain_two]) + +# Debug +template = """List all possible words as substitute for 'artificial' as comma separated. + +Current conversation: +{history} + +{input}""" + +conversation = ConversationChain( + llm=llm, + prompt=PromptTemplate(template=template, input_variables=["history", "input"], output_parser=output_parser), + memory=ConversationBufferMemory(), + verbose=True) + +conversation.predict(input="") + +# Custom Chain +from langchain.chains import LLMChain +from langchain.chains.base import Chain + +from typing import Dict, List + + +class ConcatenateChain(Chain): + chain_1: LLMChain + chain_2: LLMChain + + @property + def input_keys(self) -> List[str]: + # Union of the input keys of the two chains. + all_input_vars = set(self.chain_1.input_keys).union(set(self.chain_2.input_keys)) + return list(all_input_vars) + + @property + def output_keys(self) -> List[str]: + return ['concat_output'] + + def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: + output_1 = self.chain_1.run(inputs) + output_2 = self.chain_2.run(inputs) + return {'concat_output': output_1 + output_2} + +prompt_1 = PromptTemplate( + input_variables=["word"], + template="What is the meaning of the following word '{word}'?", +) +chain_1 = LLMChain(llm=llm, prompt=prompt_1) + +prompt_2 = PromptTemplate( + input_variables=["word"], + template="What is a word to replace the following: {word}?", +) +chain_2 = LLMChain(llm=llm, prompt=prompt_2) + +concat_chain = ConcatenateChain(chain_1=chain_1, chain_2=chain_2) +concat_output = concat_chain.run("artificial") +print(f"Concatenated output:\n{concat_output}") \ No newline at end of file diff --git a/Projects/Twitter_Algorithm/app.py b/Projects/5. Combining Components Together with Chains/Twitter_Algorithm/app.py similarity index 51% rename from Projects/Twitter_Algorithm/app.py rename to Projects/5. Combining Components Together with Chains/Twitter_Algorithm/app.py index e20e7fa..3f2cf17 100644 --- a/Projects/Twitter_Algorithm/app.py +++ b/Projects/5. Combining Components Together with Chains/Twitter_Algorithm/app.py @@ -1,5 +1,4 @@ import os -from dotenv import load_dotenv from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores import DeepLake from langchain.document_loaders import TextLoader @@ -7,18 +6,18 @@ from langchain.chat_models import ChatOpenAI from langchain.chains import ConversationalRetrievalChain -# Load the .env file -load_dotenv() - # Access the keys from the .env file OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') ACTIVELOOP_TOKEN = os.getenv('ACTIVELOOP_TOKEN') -# Define the OpenAI embeddings. +# Set the environment variables +os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY +os.environ['ACTIVELOOP_TOKEN'] = ACTIVELOOP_TOKEN + embeddings = OpenAIEmbeddings() -# load all files inside the repository. -root_dir = './the-algorithm-new' +# Load all files inside the repository. +root_dir = './the-algorithm' docs = [] for dirpath, dirnames, filenames in os.walk(root_dir): for file in filenames: @@ -32,11 +31,15 @@ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(docs) -# Perform the indexing process and upload embeddings to Active-Loop -username = "djpapzin" # replace with your username from app.activeloop.ai -db = DeepLake(dataset_path=f"hub://{username}/twitter-algorithm", embedding_function=embeddings) -db.add_documents(texts) +# Check the content of the texts variable +print(f"Number of text chunks: {len(texts)}") + +# Explicitly embed the texts using OpenAIEmbeddings +embedded_texts = [embeddings.embed(text) for text in texts] +# Perform the indexing process and upload embeddings to Deep Lake +db = DeepLake(dataset_path="hub://djpapzin/twitter-algorithm", embedding_function=embeddings) +db.add_documents(embedded_texts) # Using the embedded texts # Define the retriever retriever = db.as_retriever() @@ -45,33 +48,18 @@ retriever.search_kwargs['maximal_marginal_relevance'] = True retriever.search_kwargs['k'] = 10 -# load the dataset, +# Load the dataset db = DeepLake(dataset_path="hub://djpapzin/twitter-algorithm", read_only=True, embedding_function=embeddings) # Connect to GPT-4 for question answering -model = ChatOpenAI(model='gpt-3.5-turbo') # switch to 'gpt-4' - - -# Establish the retriever -qa = ConversationalRetrievalChain.from_llm(model,retriever=retriever) +model = ChatOpenAI(model='gpt-3.5-turbo') # switch to 'gpt-4' +qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever) -# Create the Conversational Chain -# Define all the juicy questions you want to be answered: +# Define questions and get answers questions = [ "What does favCountParams do?", "is it Likes + Bookmarks, or not clear from the code?", - "What are the major negative modifiers that lower your linear ranking parameters?", - "How do you get assigned to SimClusters?", - "What is needed to migrate from one SimClusters to another SimClusters?", - "How much do I get boosted within my cluster?", - "How does Heavy ranker work. what are it’s main inputs?", - "How can one influence Heavy ranker?", - "why threads and long tweets do so well on the platform?", - "Are thread and long tweet creators building a following that reacts to only threads?", - "Do you need to follow different strategies to get most followers vs to get most likes and bookmarks per tweet?", - "Content meta data and how it impacts virality (e.g. ALT in images).", - "What are some unexpected fingerprints for spam factors?", - "Is there any difference between company verified checkmarks and blue verified individual checkmarks?", + # ... [add more questions as needed] ] chat_history = [] diff --git a/Projects/5. Combining Components Together with Chains/Twitter_Algorithm/requirements.txt b/Projects/5. Combining Components Together with Chains/Twitter_Algorithm/requirements.txt new file mode 100644 index 0000000..e6b90c0 --- /dev/null +++ b/Projects/5. Combining Components Together with Chains/Twitter_Algorithm/requirements.txt @@ -0,0 +1,4 @@ +langchain +deeplake +openai +tiktoken \ No newline at end of file diff --git a/Projects/Twitter_Algorithm/the-algorithm-new b/Projects/5. Combining Components Together with Chains/Twitter_Algorithm/the-algorithm-new similarity index 100% rename from Projects/Twitter_Algorithm/the-algorithm-new rename to Projects/5. Combining Components Together with Chains/Twitter_Algorithm/the-algorithm-new diff --git a/Projects/5. Combining Components Together with Chains/Youtube_Summarizer.py b/Projects/5. Combining Components Together with Chains/Youtube_Summarizer.py new file mode 100644 index 0000000..6a4b5f2 --- /dev/null +++ b/Projects/5. Combining Components Together with Chains/Youtube_Summarizer.py @@ -0,0 +1,98 @@ +import os +from dotenv import load_dotenv +import yt_dlp +import whisper +from langchain import OpenAI, LLMChain +from langchain.chains.mapreduce import MapReduceChain +from langchain.prompts import PromptTemplate +from langchain.chains.summarize import load_summarize_chain +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.docstore.document import Document +from langchain.chains.summarize import load_summarize_chain +import textwrap + +# load API kets +load_dotenv() + +# set API keys +openai_api_key = os.environ['OPENAI_API_KEY'] +activeloop_token = os.environ['ACTIVELOOP_TOKEN'] + +def download_mp4_from_youtube(url): + # Set the options for the download + filename = 'lecuninterview.mp4' + ydl_opts = { + 'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]', + 'outtmpl': filename, + 'quiet': True, + } + + # Download the video file + with yt_dlp.YoutubeDL(ydl_opts) as ydl: + result = ydl.extract_info(url, download=True) + +url = "https://www.youtube.com/watch?v=mBjPyte2ZZo" +download_mp4_from_youtube(url) + +# Whisper +model = whisper.load_model("base") +result = model.transcribe("lecuninterview.mp4") +print(result['text']) + +with open ('text.txt', 'w') as file: + file.write(result['text']) + + +# Summarization with LangChain +llm = OpenAI(model_name="text-davinci-003", temperature=0) + +text_splitter = RecursiveCharacterTextSplitter( + chunk_size=1000, chunk_overlap=0, separators=[" ", ",", "\n"] +) + +with open('text.txt') as f: + text = f.read() + +texts = text_splitter.split_text(text) +docs = [Document(page_content=t) for t in texts[:4]] + +chain = load_summarize_chain(llm, chain_type="map_reduce") + +output_summary = chain.run(docs) +wrapped_text = textwrap.fill(output_summary, width=100) +print(wrapped_text) + +# The "stuff" approach +prompt_template = """Write a concise bullet point summary of the following: + + +{text} + + +CONSCISE SUMMARY IN BULLET POINTS:""" + +BULLET_POINT_PROMPT = PromptTemplate(template=prompt_template, + input_variables=["text"]) + + +# nitialized the summarization chain using the stuff as chain_type and the prompt above. +chain = load_summarize_chain(llm, + chain_type="stuff", + prompt=BULLET_POINT_PROMPT) + +output_summary = chain.run(docs) + +wrapped_text = textwrap.fill(output_summary, + width=1000, + break_long_words=False, + replace_whitespace=False) +print(wrapped_text) + +# The 'refine' summarization chain is a method for generating more accurate and context-aware summaries. +# This method can result in more accurate and context-aware summaries compared to other chain types like 'stuff' and 'map_reduce'. +chain = load_summarize_chain(llm, chain_type="refine") + +output_summary = chain.run(docs) +wrapped_text = textwrap.fill(output_summary, width=100) +print(wrapped_text) + diff --git a/Projects/self-critique-chain/app.py b/Projects/5. Combining Components Together with Chains/self-critique-chain/app.py similarity index 100% rename from Projects/self-critique-chain/app.py rename to Projects/5. Combining Components Together with Chains/self-critique-chain/app.py diff --git a/Projects/self-critique-chain/real_world_example.py b/Projects/5. Combining Components Together with Chains/self-critique-chain/real_world_example.py similarity index 100% rename from Projects/self-critique-chain/real_world_example.py rename to Projects/5. Combining Components Together with Chains/self-critique-chain/real_world_example.py diff --git a/Projects/5. Combining Components Together with Chains/yt-video-summarizer b/Projects/5. Combining Components Together with Chains/yt-video-summarizer new file mode 160000 index 0000000..b35165c --- /dev/null +++ b/Projects/5. Combining Components Together with Chains/yt-video-summarizer @@ -0,0 +1 @@ +Subproject commit b35165cb2bf70f6c9a2f94c2bba2db6cf6432c55 diff --git a/Projects/AI_News_Chatbot/app.py b/Projects/6. Giving Memory to LLMs/AI_News_Chatbot/app.py similarity index 100% rename from Projects/AI_News_Chatbot/app.py rename to Projects/6. Giving Memory to LLMs/AI_News_Chatbot/app.py diff --git a/Projects/AI_News_Chatbot/chatbot_app.py b/Projects/6. Giving Memory to LLMs/AI_News_Chatbot/chatbot_app.py similarity index 100% rename from Projects/AI_News_Chatbot/chatbot_app.py rename to Projects/6. Giving Memory to LLMs/AI_News_Chatbot/chatbot_app.py diff --git a/Projects/AI_News_Chatbot/Requirements.txt b/Projects/6. Giving Memory to LLMs/AI_News_Chatbot/requirements.txt similarity index 100% rename from Projects/AI_News_Chatbot/Requirements.txt rename to Projects/6. Giving Memory to LLMs/AI_News_Chatbot/requirements.txt diff --git a/Projects/Adding_Memory/ConversationBufferMemory.py b/Projects/6. Giving Memory to LLMs/Adding_Memory/ConversationBufferMemory.py similarity index 100% rename from Projects/Adding_Memory/ConversationBufferMemory.py rename to Projects/6. Giving Memory to LLMs/Adding_Memory/ConversationBufferMemory.py diff --git a/Projects/Adding_Memory/ConversationChain.py b/Projects/6. Giving Memory to LLMs/Adding_Memory/ConversationChain.py similarity index 100% rename from Projects/Adding_Memory/ConversationChain.py rename to Projects/6. Giving Memory to LLMs/Adding_Memory/ConversationChain.py diff --git a/Projects/Chat-with-Github-Repo/.env.example b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.env.example similarity index 100% rename from Projects/Chat-with-Github-Repo/.env.example rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.env.example diff --git a/Projects/Chat-with-Github-Repo/.flake8 b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.flake8 similarity index 100% rename from Projects/Chat-with-Github-Repo/.flake8 rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.flake8 diff --git a/Projects/Chat-with-Github-Repo/.vscode/extensions.json b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.vscode/extensions.json similarity index 100% rename from Projects/Chat-with-Github-Repo/.vscode/extensions.json rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.vscode/extensions.json diff --git a/Projects/Chat-with-Github-Repo/.vscode/settings.json b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.vscode/settings.json similarity index 100% rename from Projects/Chat-with-Github-Repo/.vscode/settings.json rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/.vscode/settings.json diff --git a/Projects/Chat-with-Github-Repo/LICENSE b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/LICENSE similarity index 100% rename from Projects/Chat-with-Github-Repo/LICENSE rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/LICENSE diff --git a/Projects/Chat-with-Github-Repo/README.md b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/README.md similarity index 100% rename from Projects/Chat-with-Github-Repo/README.md rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/README.md diff --git a/Projects/Chat-with-Github-Repo/dev-requirements.txt b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/dev-requirements.txt similarity index 100% rename from Projects/Chat-with-Github-Repo/dev-requirements.txt rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/dev-requirements.txt diff --git a/Projects/Chat-with-Github-Repo/pyproject.toml b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/pyproject.toml similarity index 100% rename from Projects/Chat-with-Github-Repo/pyproject.toml rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/pyproject.toml diff --git a/Projects/Chat-with-Github-Repo/requirements.txt b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/requirements.txt similarity index 100% rename from Projects/Chat-with-Github-Repo/requirements.txt rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/requirements.txt diff --git a/Projects/Chat-with-Github-Repo/src/__init__.py b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/__init__.py similarity index 100% rename from Projects/Chat-with-Github-Repo/src/__init__.py rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/__init__.py diff --git a/Projects/Chat-with-Github-Repo/src/main.py b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/main.py similarity index 100% rename from Projects/Chat-with-Github-Repo/src/main.py rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/main.py diff --git a/Projects/Chat-with-Github-Repo/src/utils/__init__.py b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/utils/__init__.py similarity index 100% rename from Projects/Chat-with-Github-Repo/src/utils/__init__.py rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/utils/__init__.py diff --git a/Projects/Chat-with-Github-Repo/src/utils/chat.py b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/utils/chat.py similarity index 100% rename from Projects/Chat-with-Github-Repo/src/utils/chat.py rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/utils/chat.py diff --git a/Projects/Chat-with-Github-Repo/src/utils/process.py b/Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/utils/process.py similarity index 100% rename from Projects/Chat-with-Github-Repo/src/utils/process.py rename to Projects/6. Giving Memory to LLMs/Chat-with-Github-Repo/src/utils/process.py diff --git a/Projects/Financial_Data_Chatbot/app.py b/Projects/6. Giving Memory to LLMs/Financial_Data_Chatbot/app.py similarity index 97% rename from Projects/Financial_Data_Chatbot/app.py rename to Projects/6. Giving Memory to LLMs/Financial_Data_Chatbot/app.py index cf1ab20..b00ced3 100644 --- a/Projects/Financial_Data_Chatbot/app.py +++ b/Projects/6. Giving Memory to LLMs/Financial_Data_Chatbot/app.py @@ -5,7 +5,7 @@ from langchain.text_splitter import CharacterTextSplitter from langchain import OpenAI from langchain.chains import RetrievalQA -from langchain.llms import OpenAIChat +from langchain.chat_models import ChatOpenAI from langchain.document_loaders import PagedPDFSplitter load_dotenv() @@ -70,6 +70,6 @@ def load_reports(urls: List[str]) -> List[str]: db.add_documents(texts) -qa = RetrievalQA.from_chain_type(llm=OpenAIChat(model='gpt-3.5-turbo'), chain_type='stuff', retriever=db.as_retriever()) +qa = RetrievalQA.from_chain_type(llm=ChatOpenAI(model='gpt-3.5-turbo'), chain_type='stuff', retriever=db.as_retriever()) qa.run("Combine total revenue in 2020?") \ No newline at end of file diff --git a/Projects/Financial_Data_Chatbot/requirements.txt b/Projects/6. Giving Memory to LLMs/Financial_Data_Chatbot/requirements.txt similarity index 100% rename from Projects/Financial_Data_Chatbot/requirements.txt rename to Projects/6. Giving Memory to LLMs/Financial_Data_Chatbot/requirements.txt diff --git a/Projects/Types_of_Conversational_Memory/ConversationBufferMemory.py b/Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/ConversationBufferMemory.py similarity index 100% rename from Projects/Types_of_Conversational_Memory/ConversationBufferMemory.py rename to Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/ConversationBufferMemory.py diff --git a/Projects/Types_of_Conversational_Memory/ConversationBufferWindowMemory.py b/Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/ConversationBufferWindowMemory.py similarity index 100% rename from Projects/Types_of_Conversational_Memory/ConversationBufferWindowMemory.py rename to Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/ConversationBufferWindowMemory.py diff --git a/Projects/Types_of_Conversational_Memory/ConversationSummaryMemory.py b/Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/ConversationSummaryMemory.py similarity index 100% rename from Projects/Types_of_Conversational_Memory/ConversationSummaryMemory.py rename to Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/ConversationSummaryMemory.py diff --git a/Projects/Types_of_Conversational_Memory/Token_count.py b/Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/Token_count.py similarity index 100% rename from Projects/Types_of_Conversational_Memory/Token_count.py rename to Projects/6. Giving Memory to LLMs/Types_of_Conversational_Memory/Token_count.py diff --git a/Projects/Custom_Document_Retrieval_Tool/app.py b/Projects/7. Making LLMs Interact with the World Using Tools/Custom_Document_Retrieval_Tool/app.py similarity index 100% rename from Projects/Custom_Document_Retrieval_Tool/app.py rename to Projects/7. Making LLMs Interact with the World Using Tools/Custom_Document_Retrieval_Tool/app.py diff --git a/Projects/LangChain_Agents_and_Toolkits/GoogleSearchAPIWrapper.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/GoogleSearchAPIWrapper.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/GoogleSearchAPIWrapper.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/GoogleSearchAPIWrapper.py diff --git a/Projects/LangChain_Agents_and_Toolkits/Python_REPL.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/Python_REPL.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/Python_REPL.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/Python_REPL.py diff --git a/Projects/LangChain_Agents_and_Toolkits/Wikipedia_WolframAlpha.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/Wikipedia_WolframAlpha.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/Wikipedia_WolframAlpha.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/Wikipedia_WolframAlpha.py diff --git a/Projects/LangChain_Agents_and_Toolkits/__pycache__/requests.cpython-310.pyc b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/__pycache__/requests.cpython-310.pyc similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/__pycache__/requests.cpython-310.pyc rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/__pycache__/requests.cpython-310.pyc diff --git a/Projects/LangChain_Agents_and_Toolkits/__pycache__/wikipedia.cpython-310.pyc b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/__pycache__/wikipedia.cpython-310.pyc similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/__pycache__/wikipedia.cpython-310.pyc rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/__pycache__/wikipedia.cpython-310.pyc diff --git a/Projects/LangChain_Agents_and_Toolkits/google_search.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/google_search.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/google_search.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/google_search.py diff --git a/Projects/LangChain_Agents_and_Toolkits/requirements.txt b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/requirements.txt similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/requirements.txt rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/requirements.txt diff --git a/Projects/LangChain_Agents_and_Toolkits/serpapi.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/serpapi.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/serpapi.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/serpapi.py diff --git a/Projects/LangChain_Agents_and_Toolkits/using_WolframAlpha.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/using_WolframAlpha.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/using_WolframAlpha.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/using_WolframAlpha.py diff --git a/Projects/LangChain_Agents_and_Toolkits/using_requests.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/using_requests.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/using_requests.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/using_requests.py diff --git a/Projects/LangChain_Agents_and_Toolkits/using_wikipedia.py b/Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/using_wikipedia.py similarity index 100% rename from Projects/LangChain_Agents_and_Toolkits/using_wikipedia.py rename to Projects/7. Making LLMs Interact with the World Using Tools/LangChain_Agents_and_Toolkits/using_wikipedia.py diff --git a/Projects/Recreating_the_Bing_Chatbot/app.py b/Projects/7. Making LLMs Interact with the World Using Tools/Recreating_the_Bing_Chatbot/app.py similarity index 100% rename from Projects/Recreating_the_Bing_Chatbot/app.py rename to Projects/7. Making LLMs Interact with the World Using Tools/Recreating_the_Bing_Chatbot/app.py diff --git a/Projects/Recreating_the_Bing_Chatbot/google_api.py b/Projects/7. Making LLMs Interact with the World Using Tools/Recreating_the_Bing_Chatbot/google_api.py similarity index 100% rename from Projects/Recreating_the_Bing_Chatbot/google_api.py rename to Projects/7. Making LLMs Interact with the World Using Tools/Recreating_the_Bing_Chatbot/google_api.py diff --git a/Projects/Recreating_the_Bing_Chatbot/requirements.txt b/Projects/7. Making LLMs Interact with the World Using Tools/Recreating_the_Bing_Chatbot/requirements.txt similarity index 100% rename from Projects/Recreating_the_Bing_Chatbot/requirements.txt rename to Projects/7. Making LLMs Interact with the World Using Tools/Recreating_the_Bing_Chatbot/requirements.txt diff --git a/Projects/Web-Based-Question-Answering/app.py b/Projects/7. Making LLMs Interact with the World Using Tools/Web-Based-Question-Answering/app.py similarity index 100% rename from Projects/Web-Based-Question-Answering/app.py rename to Projects/7. Making LLMs Interact with the World Using Tools/Web-Based-Question-Answering/app.py diff --git a/Projects/What-Are-Agents/app.py b/Projects/7. Making LLMs Interact with the World Using Tools/What-Are-Agents/app.py similarity index 100% rename from Projects/What-Are-Agents/app.py rename to Projects/7. Making LLMs Interact with the World Using Tools/What-Are-Agents/app.py diff --git a/Projects/What-Are-Agents/requirements.txt b/Projects/7. Making LLMs Interact with the World Using Tools/What-Are-Agents/requirements.txt similarity index 100% rename from Projects/What-Are-Agents/requirements.txt rename to Projects/7. Making LLMs Interact with the World Using Tools/What-Are-Agents/requirements.txt diff --git a/Projects/What-Are-Agents/scifi_story_generator.py b/Projects/7. Making LLMs Interact with the World Using Tools/What-Are-Agents/scifi_story_generator.py similarity index 100% rename from Projects/What-Are-Agents/scifi_story_generator.py rename to Projects/7. Making LLMs Interact with the World Using Tools/What-Are-Agents/scifi_story_generator.py diff --git a/Projects/supercharge_blog_posts/Generate_Search_Queries.py b/Projects/7. Making LLMs Interact with the World Using Tools/supercharge_blog_posts/Generate_Search_Queries.py similarity index 100% rename from Projects/supercharge_blog_posts/Generate_Search_Queries.py rename to Projects/7. Making LLMs Interact with the World Using Tools/supercharge_blog_posts/Generate_Search_Queries.py diff --git a/Projects/supercharge_blog_posts/requirements.txt b/Projects/7. Making LLMs Interact with the World Using Tools/supercharge_blog_posts/requirements.txt similarity index 100% rename from Projects/supercharge_blog_posts/requirements.txt rename to Projects/7. Making LLMs Interact with the World Using Tools/supercharge_blog_posts/requirements.txt diff --git a/Projects/Agent-Simulation-Projects/autonomous_agent.py b/Projects/8. Using Language Model as Reasoning Engines with Agents/Agent-Simulation-Projects/autonomous_agent.py similarity index 100% rename from Projects/Agent-Simulation-Projects/autonomous_agent.py rename to Projects/8. Using Language Model as Reasoning Engines with Agents/Agent-Simulation-Projects/autonomous_agent.py diff --git a/Projects/Agent-Simulation-Projects/camel.py b/Projects/8. Using Language Model as Reasoning Engines with Agents/Agent-Simulation-Projects/camel.py similarity index 100% rename from Projects/Agent-Simulation-Projects/camel.py rename to Projects/8. Using Language Model as Reasoning Engines with Agents/Agent-Simulation-Projects/camel.py diff --git a/Projects/Agent-Simulation-Projects/requirements.txt b/Projects/8. Using Language Model as Reasoning Engines with Agents/Agent-Simulation-Projects/requirements.txt similarity index 100% rename from Projects/Agent-Simulation-Projects/requirements.txt rename to Projects/8. Using Language Model as Reasoning Engines with Agents/Agent-Simulation-Projects/requirements.txt diff --git a/Projects/Autonomous-Agents/AutoGPT.py b/Projects/8. Using Language Model as Reasoning Engines with Agents/Autonomous-Agents/AutoGPT.py similarity index 100% rename from Projects/Autonomous-Agents/AutoGPT.py rename to Projects/8. Using Language Model as Reasoning Engines with Agents/Autonomous-Agents/AutoGPT.py diff --git a/Projects/Autonomous-Agents/babyAGI.py b/Projects/8. Using Language Model as Reasoning Engines with Agents/Autonomous-Agents/babyAGI.py similarity index 100% rename from Projects/Autonomous-Agents/babyAGI.py rename to Projects/8. Using Language Model as Reasoning Engines with Agents/Autonomous-Agents/babyAGI.py diff --git a/Projects/Autonomous-Agents/requirements.txt b/Projects/8. Using Language Model as Reasoning Engines with Agents/Autonomous-Agents/requirements.txt similarity index 100% rename from Projects/Autonomous-Agents/requirements.txt rename to Projects/8. Using Language Model as Reasoning Engines with Agents/Autonomous-Agents/requirements.txt diff --git a/Projects/FableForge/Sales_Copilot.py b/Projects/FableForge/Sales_Copilot.py new file mode 100644 index 0000000..5d4f34a --- /dev/null +++ b/Projects/FableForge/Sales_Copilot.py @@ -0,0 +1,87 @@ +import os +import re +from langchain.embeddings import OpenAIEmbeddings +from langchain.vectorstores import DeepLake +from langchain.chat_models import ChatOpenAI +from langchain.schema import SystemMessage, HumanMessage, AIMessage + +class DeepLakeLoader: + def __init__(self, source_data_path): + self.source_data_path = source_data_path + self.file_name = os.path.basename(source_data_path) # What we'll name our database + self.data = self.split_data() + + if self.check_if_db_exists(): + self.db = self.load_db() + else: + self.db = self.create_db() + + def split_data(self): # Correctly indented to be part of the class + """ + Preprocess the data by splitting it into passages. + + If using a different data source, this function will need to be modified. + + Returns: + split_data (list): List of passages. + """ + with open(self.source_data_path, 'r') as f: + content = f.read() + split_data = re.split(r'(?=\d+\. )', content) + if split_data[0] == '': + split_data.pop(0) + split_data = [entry for entry in split_data if len(entry) >= 30] + return split_data + + def load_db(self): + """ + Load the database if it already exists. + + Returns: + DeepLake: DeepLake object. + """ + return DeepLake(dataset_path=f'deeplake/{self.file_name}', embedding_function=OpenAIEmbeddings(), read_only=True) + + def create_db(self): + """ + Create the database if it does not already exist. + + Databases are stored in the deeplake directory. + + Returns: + DeepLake: DeepLake object. + """ + return DeepLake.from_texts(self.data, OpenAIEmbeddings(), dataset_path=f'deeplake/{self.file_name}') + + def query_db(self, query): + """ + Query the database for passages that are similar to the query. + + Args: + query (str): Query string. + + Returns: + content (list): List of passages that are similar to the query. + """ + results = self.db.similarity_search(query, k=3) + content = [] + for result in results: + content.append(result.page_content) + return content + + def check_if_db_exists(self): # Placeholder for the missing method + # You'll need to implement this method to check if the database exists + pass + +detected_objection = "Your query here" # Define the query you want to use +db = DeepLakeLoader('salestesting.txt') +results = db.query_db(detected_objection) + +objection_prompt = "Your prompt here" # Define the prompt you want to use +chat = ChatOpenAI() +system_message = SystemMessage(content=objection_prompt) +human_message = HumanMessage(content=f'Customer objection: {detected_objection} | Relevant guidelines: {results}') + +response = chat([system_message, human_message]) +print(response.content) + diff --git a/Projects/Movie Finder AI b/Projects/Movie Finder AI new file mode 160000 index 0000000..c877690 --- /dev/null +++ b/Projects/Movie Finder AI @@ -0,0 +1 @@ +Subproject commit c87769016c50de1a8f3ec5bf7eafc1cc14961a49 diff --git a/Projects/Voice_Assistant/chat.py b/Projects/Voice_Assistant/chat.py deleted file mode 100644 index e7b789d..0000000 --- a/Projects/Voice_Assistant/chat.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import openai -import streamlit as st -from audio_recorder_streamlit import audio_recorder -from elevenlabs import generate -from langchain.chains import RetrievalQA -from langchain.chat_models import ChatOpenAI -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.vectorstores import DeepLake -from streamlit_chat import message -from dotenv import load_dotenv - -load_dotenv() - -# Constants -TEMP_AUDIO_PATH = "temp_audio.wav" -AUDIO_FORMAT = "audio/wav" - -# Load environment variables from .env file and return the keys -openai.api_key = os.environ.get('OPENAI_API_KEY') -eleven_api_key = os.environ.get('ELEVEN_API_KEY') - -# TODO: use your organization id here. (by default, org id is your username) -my_activeloop_org_id = "langchain_course_deeplake" -my_activeloop_dataset_name = "langchain_course_jarvis_assistant" -dataset_path = f'hub://{my_activeloop_org_id}/{my_activeloop_dataset_name}' - - -def load_embeddings_and_database(active_loop_data_set_path): - embeddings = OpenAIEmbeddings() - db = DeepLake( - dataset_path=active_loop_data_set_path, - read_only=True, - embedding_function=embeddings - ) - return db - -# Transcribe audio using OpenAI Whisper API -def transcribe_audio(audio_file_path, openai_key): - openai.api_key = openai_key - try: - with open(audio_file_path, "rb") as audio_file: - response = openai.Audio.transcribe("whisper-1", audio_file) - return response["text"] - except Exception as e: - print(f"Error calling Whisper API: {str(e)}") - return None - -# Record audio using audio_recorder and transcribe using transcribe_audio -def record_and_transcribe_audio(): - audio_bytes = audio_recorder() - transcription = None - if audio_bytes: - st.audio(audio_bytes, format=AUDIO_FORMAT) - - with open(TEMP_AUDIO_PATH, "wb") as f: - f.write(audio_bytes) - - if st.button("Transcribe"): - transcription = transcribe_audio(TEMP_AUDIO_PATH, openai.api_key) - os.remove(TEMP_AUDIO_PATH) - display_transcription(transcription) - - return transcription - -# Display the transcription of the audio on the app -def display_transcription(transcription): - if transcription: - st.write(f"Transcription: {transcription}") - with open("audio_transcription.txt", "w+") as f: - f.write(transcription) - else: - st.write("Error transcribing audio.") - -# Get user input from Streamlit text input field -def get_user_input(transcription): - return st.text_input("", value=transcription if transcription else "", key="input") - -# Search the database for a response based on the user's query -def search_db(user_input, db): - print(user_input) - retriever = db.as_retriever() - retriever.search_kwargs['distance_metric'] = 'cos' - retriever.search_kwargs['fetch_k'] = 100 - retriever.search_kwargs['maximal_marginal_relevance'] = True - retriever.search_kwargs['k'] = 4 - model = ChatOpenAI(model_name='gpt-3.5-turbo') - qa = RetrievalQA.from_llm(model, retriever=retriever, return_source_documents=True) - return qa({'query': user_input}) - -# Display conversation history using Streamlit messages -def display_conversation(history): - for i in range(len(history["generated"])): - message(history["past"][i], is_user=True, key=str(i) + "_user") - message(history["generated"][i],key=str(i)) - #Voice using Eleven API - voice= "Bella" - text= history["generated"][i] - audio = generate(text=text, voice=voice,api_key=eleven_api_key) - st.audio(audio, format='audio/mp3') - -# Main function to run the app -def main(): - # Initialize Streamlit app with a title - st.write("# JarvisBase 🧙") - - # Load embeddings and the DeepLake database - db = load_embeddings_and_database(dataset_path) - - # Record and transcribe audio - transcription = record_and_transcribe_audio() - - # Get user input from text input or audio transcription - user_input = get_user_input(transcription) - - # Initialize session state for generated responses and past messages - if "generated" not in st.session_state: - st.session_state["generated"] = ["I am ready to help you"] - if "past" not in st.session_state: - st.session_state["past"] = ["Hey there!"] - - # Search the database for a response based on user input and update the session state - if user_input: - output = search_db(user_input, db) - print(output['source_documents']) - st.session_state.past.append(user_input) - response = str(output["result"]) - st.session_state.generated.append(response) - - #Display conversation history using Streamlit messages - if st.session_state["generated"]: - display_conversation(st.session_state) - -# Run the main function when the script is executed -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/Projects/Voice_Assistant/scrape.py b/Projects/Voice_Assistant/scrape.py deleted file mode 100644 index 81f5f2c..0000000 --- a/Projects/Voice_Assistant/scrape.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import requests -from bs4 import BeautifulSoup -from langchain.embeddings.openai import OpenAIEmbeddings -from langchain.vectorstores import DeepLake -from langchain.text_splitter import CharacterTextSplitter -from langchain.document_loaders import TextLoader -import re -from dotenv import load_dotenv - -# Load the .env file -load_dotenv() - -# Access the keys from the .env file -OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') -ELEVEN_API_KEY = os.getenv('ELEVEN_API_KEY') -ACTIVELOOP_TOKEN = os.getenv('ACTIVELOOP_TOKEN') - -# TODO: use your organization id here. (by default, org id is your username) -my_activeloop_org_id = "langchain_course_deeplake" -my_activeloop_dataset_name = "langchain_course_jarvis_assistant" -dataset_path = f'hub://{my_activeloop_org_id}/{my_activeloop_dataset_name}' - - -embeddings = OpenAIEmbeddings(model="text-embedding-ada-002") - -def get_documentation_urls(): - # List of relative URLs for Hugging Face documentation pages, commented a lot of these because it would take too long to scrape all of them - return [ - '/docs/huggingface_hub/guides/overview', - '/docs/huggingface_hub/guides/download', - '/docs/huggingface_hub/guides/upload', - '/docs/huggingface_hub/guides/hf_file_system', - '/docs/huggingface_hub/guides/repository', - '/docs/huggingface_hub/guides/search', - # You may add additional URLs here or replace all of them - ] - -def construct_full_url(base_url, relative_url): - # Construct the full URL by appending the relative URL to the base URL - return base_url + relative_url - -def scrape_page_content(url): - # Send a GET request to the URL and parse the HTML response using BeautifulSoup - response = requests.get(url) - soup = BeautifulSoup(response.text, 'html.parser') - # Extract the desired content from the page (in this case, the body text) - text=soup.body.text.strip() - # Remove non-ASCII characters - text = re.sub(r'[\x00-\x08\x0b-\x0c\x0e-\x1f\x7f-\xff]', '', text) - # Remove extra whitespace and newlines - text = re.sub(r'\s+', ' ', text) - return text.strip() - -def scrape_all_content(base_url, relative_urls, filename): - # Loop through the list of URLs, scrape content and add it to the content list - content = [] - for relative_url in relative_urls: - full_url = construct_full_url(base_url, relative_url) - scraped_content = scrape_page_content(full_url) - content.append(scraped_content.rstrip('\n')) - - # Write the scraped content to a file - with open(filename, 'w', encoding='utf-8') as file: - for item in content: - file.write("%s\n" % item) - - return content - -# Define a function to load documents from a file -def load_docs(root_dir,filename): - # Create an empty list to hold the documents - docs = [] - try: - # Load the file using the TextLoader class and UTF-8 encoding - loader = TextLoader(os.path.join( - root_dir, filename), encoding='utf-8') - # Split the loaded file into separate documents and add them to the list of documents - docs.extend(loader.load_and_split()) - except Exception as e: - # If an error occurs during loading, ignore it and return an empty list of documents - pass - # Return the list of documents - return docs - -def split_docs(docs): - text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) - return text_splitter.split_documents(docs) - -# Define the main function -def main(): - base_url = 'https://huggingface.co' - # Set the name of the file to which the scraped content will be saved - filename='content.txt' - # Set the root directory where the content file will be saved - root_dir ='./' - relative_urls = get_documentation_urls() - # Scrape all the content from the relative URLs and save it to the content file - content = scrape_all_content(base_url, relative_urls,filename) - # Load the content from the file - docs = load_docs(root_dir,filename) - # Split the content into individual documents - texts = split_docs(docs) - # Create a DeepLake database with the given dataset path and embedding function - db = DeepLake(dataset_path=dataset_path, embedding_function=embeddings) - # Add the individual documents to the database - db.add_documents(texts) - # Clean up by deleting the content file - os.remove(filename) - -# Call the main function if this script is being run as the main program -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/Projects/yt-video-summarizer/Youtube_Summarizer.py b/Projects/yt-video-summarizer/Youtube_Summarizer.py deleted file mode 100644 index 6d4e647..0000000 --- a/Projects/yt-video-summarizer/Youtube_Summarizer.py +++ /dev/null @@ -1,133 +0,0 @@ -# Import necessary libraries and modules -import os -from dotenv import load_dotenv -import yt_dlp -import whisper -from langchain.chat_models import ChatOpenAI -from langchain.chains import LLMChain -from langchain.prompts import PromptTemplate -from langchain.chains.summarize import load_summarize_chain -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.docstore.document import Document -from langchain.chains.summarize import load_summarize_chain -import textwrap -import streamlit as st - -# Function to download MP4 videos from YouTube -def download_mp4_from_youtube(url, selected_quality): - # Define options for video download based on user's choice - ydl_opts = { - 'format': f'bestvideo[height={selected_quality}][ext=mp4]+bestaudio[ext=m4a]/best[height={selected_quality}][ext=mp4]', - 'outtmpl': '%(title)s.mp4', - 'quiet': True, - } - - # Download the video based on the selected quality - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - result = ydl.extract_info(url, download=True) - video_title = result['title'] - - return video_title - -# Function to summarize a YouTube video -def summarize_youtube_video(url, selected_quality): - # Download the video from YouTube and get the title - video_title = download_mp4_from_youtube(url, selected_quality) - - # Load the Whisper model for transcription - model = whisper.load_model("tiny") - result = model.transcribe(f"{video_title}.mp4") - - # Extract transcription from the result - transcription = result['text'] - - # Save the transcription to a file - with open(f'{video_title}_transcription.txt', 'w') as file: - file.write(transcription) - - # Initialize the LangChain model for summarization - llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) - - # Split the transcription text into manageable chunks - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=1000, chunk_overlap=0, separators=[" ", ",", "\n"] - ) - with open(f'{video_title}_transcription.txt') as f: - text = f.read() - texts = text_splitter.split_text(text) - docs = [Document(page_content=t) for t in texts[:4]] - - # Define the prompt for the 'refine' summarization chain - prompt_template = """Write a concise bullet point summary of the following: -{text} -CONSCISE SUMMARY IN BULLET POINTS:""" - BULLET_POINT_PROMPT = PromptTemplate(template=prompt_template, - input_variables=["text"]) - chain = load_summarize_chain(llm, chain_type="refine", refine_prompt=BULLET_POINT_PROMPT) - - # Use the 'refine' summarization chain to generate summaries for each chunk of text - summaries = [] - for doc in docs: - summary_doc, _ = chain(doc) - summaries.append(summary_doc.page_content) - - # Join the summaries into one string and wrap it to fit in a column width of 80 characters - summary = "\n".join(summaries) - summary_wrapped = "\n".join(textwrap.wrap(summary, width=80)) - - return transcription, summary_wrapped, video_title - -# Function to run the YouTube video summarizer app -def run_youtube_summarizer(): - # Load environment variables (API keys in this case) - load_dotenv() - - # Retrieve and set API keys from environment variables - openai_api_key = os.environ['OPENAI_API_KEY'] - activeloop_token = os.environ['ACTIVELOOP_TOKEN'] - - # Set the title of the streamlit app - st.title("YouTube Video Summarizer") - - # Get YouTube video URL from user using a text input widget - url = st.text_input("Enter the YouTube video URL: ") - - # Check if the URL is valid and not empty - if url and yt_dlp.validate_url(url): - # Define options to list available video formats - ydl_opts = { - 'listformats': True, - } - # Extract video information without downloading - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - result = ydl.extract_info(url, download=False) - - # Parse and display available video quality options - available_formats = set() - for format in result['formats']: - if format['ext'] == 'mp4' and 'height' in format: - height = format['height'] - available_formats.add(height) - available_formats_str = [str(height) + 'p' for height in sorted(available_formats)] - - # Get user input for desired video quality - selected_quality = st.radio("Select video quality:", available_formats_str) - - # Button to start the process of transcribing and summarizing - if st.button("Start"): - # Summarize the YouTube video - transcription, summary, video_title = summarize_youtube_video(url, selected_quality) - - # Display the video using a player component - st.video(f"{video_title}.mp4") - - # Display the transcription and the summary using expander components - with st.expander("Transcription"): - st.write(transcription) - - with st.expander("Summary"): - st.write(summary) - - # Button to download the transcription and the summary as text files - st.download_button(label="Download transcription", data=transcription, file_name=f"{video_title}_transcription.txt", mime="text/plain") - st.download_button(label="Download summary", data=summary, file_name=f"{video_title}_summary.txt", mime="text/plain") diff --git a/Projects/yt-video-summarizer/app.py b/Projects/yt-video-summarizer/app.py deleted file mode 100644 index 910cca6..0000000 --- a/Projects/yt-video-summarizer/app.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from dotenv import load_dotenv -import streamlit as st -import yt_dlp -import whisper -from langchain.chat_models import ChatOpenAI -from langchain.chains import LLMChain -from langchain.prompts import PromptTemplate -from langchain.chains.summarize import load_summarize_chain -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.docstore.document import Document -from langchain.chains.summarize import load_summarize_chain -import textwrap - -# Load environment variables (API keys in this case) -load_dotenv() - -# Set the title of the app -st.title("Youtube Video Summarizer") - -# Retrieve and set API keys from environment variables -openai_api_key = os.environ['OPENAI_API_KEY'] -activeloop_token = os.environ['ACTIVELOOP_TOKEN'] - -# Function to download MP4 videos from YouTube -def download_mp4_from_youtube(url): - # Extract video information without downloading - with yt_dlp.YoutubeDL() as ydl: - result = ydl.extract_info(url, download=False) - - # Parse and display available video quality options - available_formats_str = ["Select preferred video format"] - available_formats = [] - for format in result['formats']: - if format['ext'] == 'mp4' and 'height' in format: - height = format['height'] - available_formats_str.append(str(height) + 'p') - available_formats.append(format) - available_formats_str = sorted(list(set(available_formats_str))) - - # Display the radio buttons - selected_quality = st.radio("Select video quality:", available_formats_str, index=0) - - # Download the video based on the selected quality - if selected_quality != "Select preferred video format": - selected_format = available_formats[available_formats_str.index(selected_quality)] - ydl_opts = { - 'format': selected_format['format_id'], - 'outtmpl': '%(title)s.mp4', - 'quiet': True, - } - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - result = ydl.extract_info(url, download=True) - video_title = result['title'] - else: - video_title = None - - return video_title, selected_quality - -# Get YouTube video URL from user -url = st.text_input("Enter the YouTube video URL: ") -selected_quality = None - -if st.button("Start"): - if url: - video_title, selected_quality = download_mp4_from_youtube(url) - - if video_title: - # Show progress updates to the user - progress_bar = st.progress(0) - status_text = st.empty() - - # Load the Whisper model for transcription - model = whisper.load_model("tiny") - result = model.transcribe(f"{video_title}.mp4") - - # Extract transcription from the result - transcription = result['text'] - - # Save the transcription to a file - with open(f'{video_title}_transcription.txt', 'w') as file: - file.write(transcription) - - # Initialize the LangChain model for summarization - llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) - - # Split the transcription text into manageable chunks - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=1000, chunk_overlap=0, separators=[" ", ",", "\n"] - ) - with open(f'{video_title}_transcription.txt') as f: - text = f.read() - texts = text_splitter.split_text(text) - docs = [Document(page_content=t) for t in texts[:4]] - - # Use the 'refine' summarization chain with the custom prompt - chain = load_summarize_chain(llm, chain_type="refine") - output_summary = chain.run(docs) - wrapped_text = textwrap.fill(output_summary, width=100) - - # Display the refined summary - st.subheader("Summary:") - st.write(wrapped_text) - - # Button to download transcript - if st.button("Download Transcript"): - st.download_button( - label="Download Transcript", - data=f'{video_title}_transcription.txt', - file_name=f'{video_title}_transcription.txt', - ) - - # Button to download summary - if st.button("Download Summary"): - st.download_button( - label="Download Summary", - data=wrapped_text, - file_name=f'{video_title}_summary.txt', - ) - - # Expand icon to display the rest of the transcription - with st.expander("Transcription", expanded=True): - st.write(transcription) - else: - st.write("Please select a video quality.") diff --git a/Projects/yt-video-summarizer/app_aider.py b/Projects/yt-video-summarizer/app_aider.py deleted file mode 100644 index b05cb3f..0000000 --- a/Projects/yt-video-summarizer/app_aider.py +++ /dev/null @@ -1,135 +0,0 @@ -import os -from dotenv import load_dotenv -import streamlit as st -import yt_dlp -import whisper -from langchain.chat_models import ChatOpenAI -from langchain.chains import LLMChain -from langchain.prompts import PromptTemplate -from langchain.chains.summarize import load_summarize_chain -from langchain.text_splitter import RecursiveCharacterTextSplitter -from langchain.docstore.document import Document -from langchain.chains.summarize import load_summarize_chain -import textwrap - -# Load environment variables (API keys in this case) -load_dotenv() - -# Set the title of the app -st.title("Youtube Video Summarizer") - -# Retrieve and set API keys from environment variables -openai_api_key = os.environ['OPENAI_API_KEY'] -activeloop_token = os.environ['ACTIVELOOP_TOKEN'] - -# Function to download MP4 videos from YouTube -def download_mp4_from_youtube(url, selected_quality): - ydl_opts = { - 'format': f'bestvideo[height={selected_quality[:-1]}][ext=mp4]+bestaudio[ext=m4a]/best[height={selected_quality[:-1]}][ext=mp4]', - 'outtmpl': '%(title)s.mp4', - 'quiet': True, - } - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - result = ydl.extract_info(url, download=True) - video_title = result['title'] - return video_title - -# Get YouTube video URL from user -url = st.session_state.get('url', '') -selected_quality = st.session_state.get('selected_quality', None) - -url = st.text_input("Enter the YouTube video URL:", value=url, key="video_url") -st.session_state['url'] = url - -# Check if video has already been downloaded -if 'video_downloaded' not in st.session_state: - st.session_state['video_downloaded'] = False - -if not st.session_state['video_downloaded']: - available_formats = ["720p", "480p", "360p"] # Define the available video formats here - options = ["Select preferred video format"] + available_formats - selected_quality = st.selectbox("Select video quality:", options, index=0, key="video_quality") - st.session_state['selected_quality'] = selected_quality - - if st.button("Start", key="start_button"): - st.session_state['start_button_pressed'] = True - if selected_quality != "Select preferred video format": - video_title = download_mp4_from_youtube(url, selected_quality) - st.session_state['video_downloaded'] = True - st.session_state['video_title'] = video_title -else: - video_title = st.session_state['video_title'] - -if st.session_state.get('start_button_pressed', False): - if url and selected_quality: - if video_title and selected_quality: - # Show progress updates to the user - progress_bar = st.progress(0) - status_text = st.empty() - - # Load the Whisper model for transcription - model = whisper.load_model("tiny") - result = model.transcribe(f"{video_title}.mp4") - - # Extract transcription from the result - transcription = result['text'] - - # Save the transcription to a file - with open(f'{video_title}_transcription.txt', 'w') as file: - file.write(transcription) - - # Initialize the LangChain model for summarization - llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0) - - # Split the transcription text into manageable chunks - text_splitter = RecursiveCharacterTextSplitter( - chunk_size=1000, chunk_overlap=0, separators=[" ", ",", "\n"] - ) - with open(f'{video_title}_transcription.txt') as f: - text = f.read() - texts = text_splitter.split_text(text) - docs = [Document(page_content=t) for t in texts[:4]] - - # Use the 'refine' summarization chain with the custom prompt - chain = load_summarize_chain(llm, chain_type="refine") - output_summary = chain.run(docs) - wrapped_text = textwrap.fill(output_summary, width=100) - - # Update progress bar and status text - progress_bar.progress(50) - status_text.text("Transcription complete") - - # Expand icon to display the rest of the transcription - with st.expander("Transcription", expanded=False): - st.write(transcription) - - # Button to download transcript - if st.button("Download Transcript", key="download_transcript"): - st.download_button( - label="Download Transcript", - data=f'{video_title}_transcription.txt', - file_name=f'{video_title}_transcription.txt', - on_click=None - ) - - # Display the refined summary - st.subheader("Summary:") - st.write(wrapped_text) - - # Button to download summary - if st.button("Download Summary", key="download_summary"): - st.download_button( - label="Download Summary", - data=wrapped_text, - file_name=f'{video_title}_summary.txt', - on_click=None - ) - - # Update progress bar and status text - progress_bar.progress(100) - status_text.text("Summary complete") - - else: - st.write("Please select a video quality.") - else: - st.write("Please enter a YouTube video URL.") diff --git a/Projects/yt-video-summarizer/requirements.txt b/Projects/yt-video-summarizer/requirements.txt deleted file mode 100644 index 39a5c46..0000000 --- a/Projects/yt-video-summarizer/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -dotenv -streamlit -yt-dlp -whisper -langchain -textwrap3