From cc775ce30d601ebe04341ba0a20c3690f2183fe5 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Apr 2024 11:57:37 -0700 Subject: [PATCH 1/5] code example --- docs/tracing/faq/logging_and_viewing.mdx | 62 +++++++++++++----------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/docs/tracing/faq/logging_and_viewing.mdx b/docs/tracing/faq/logging_and_viewing.mdx index 6a338b1b..571d132e 100644 --- a/docs/tracing/faq/logging_and_viewing.mdx +++ b/docs/tracing/faq/logging_and_viewing.mdx @@ -25,6 +25,24 @@ The `@traceable` decorator is a simple way to log traces from the LangSmith Pyth your desination project](/tracing/faq/customizing_trace_attributes#changing-the-destination-project-at-runtime), [add custom metadata and tags](/tracing/faq/customizing_trace_attributes#adding-metadata-and-tags-to-traces), and [customize your run name](/tracing/faq/customizing_trace_attributes#customizing-the-run-name). + Any: + return "result"\n +my_function("Why is the sky blue?") +`), + TypeScriptBlock(`import { traceable } from "langsmith/traceable";\n +const myFunction = traceable(async (text: string) => { + return "result"; +});\n +await myFunction("Why is the sky blue?"); +`), + ]} + groupId="client-language" +/> + Also available is the `wrap_openai` function. This function allows you to wrap your OpenAI client in order to automatically log traces, no decorator necessary - it is applied for you, under the hood. @@ -117,39 +135,25 @@ child_llm_run.end(outputs=chat_completion) child_llm_run.post()\n pipeline.end(outputs={"answer": chat_completion.choices[0].message.content}) pipeline.post()`), - TypeScriptBlock(`// To run the example below, ensure the environment variable OPENAI_API_KEY is set -import OpenAI from "openai"; -import { RunTree } from "langsmith";\n -// This can be a user input to your app -const question = "Can you summarize this morning's meetings?";\n -const pipeline = new RunTree({ - name: "Chat Pipeline", - run_type: "chain", - inputs: { question } + TypeScriptBlock(`import OpenAI from "openai"; +import { traceable } from "langsmith/traceable";\n +const client = new OpenAI();\n +const myTool = traceable(async (question: string) => { + return "During this morning's meeting, we solved all world conflict."; });\n -// This can be retrieved in a retrieval step -const context = "During this morning's meeting, we solved all world conflict.";\n -const messages = [ +const chatPipeline = traceable(async (question: string) => { + const context = await myTool(question); + const messages = [ { role: "system", content: "You are a helpful assistant. Please respond to the user's request only based on the given context." }, - { role: "user", content: \`Question: \${question}\nContext: \${context}\` } -];\n -// Create a child run -const childRun = await pipeline.createChild({ - name: "OpenAI Call", - run_type: "llm", - inputs: { messages }, -});\n -// Generate a completion -const client = new OpenAI(); -const chatCompletion = await client.chat.completions.create({ + { role: "user", content: \`Question: \${question}\\nContext: \${context}\` } + ]; + const chatCompletion = await client.chat.completions.create({ model: "gpt-3.5-turbo", - messages: messages, + messages: messages + }); + return chatCompletion.choices[0].message.content; });\n -// End the runs and log them -childRun.end(chatCompletion); -await childRun.postRun();\n -pipeline.end({ outputs: { answer: chatCompletion.choices[0].message.content } }); -await pipeline.postRun();`), +await chatPipeline("Can you summarize this morning's meetings?");`), APIBlock(`# To run the example below, ensure the environment variable OPENAI_API_KEY is set # Here, we'll show you to use the requests library in Python to log a trace, but you can use any HTTP client in any language. import openai From 56ea3c6d4f1a9628dc4248db13f53d6303225b52 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Apr 2024 12:01:10 -0700 Subject: [PATCH 2/5] Wrap --- docs/tracing/faq/logging_and_viewing.mdx | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/docs/tracing/faq/logging_and_viewing.mdx b/docs/tracing/faq/logging_and_viewing.mdx index 571d132e..0d8962a8 100644 --- a/docs/tracing/faq/logging_and_viewing.mdx +++ b/docs/tracing/faq/logging_and_viewing.mdx @@ -136,24 +136,30 @@ child_llm_run.post()\n pipeline.end(outputs={"answer": chat_completion.choices[0].message.content}) pipeline.post()`), TypeScriptBlock(`import OpenAI from "openai"; -import { traceable } from "langsmith/traceable";\n -const client = new OpenAI();\n +import { traceable } from "langsmith/traceable"; +import { wrapOpenAI } from "langsmith/wrappers";\n +const client = wrapOpenAI(new OpenAI());\n const myTool = traceable(async (question: string) => { return "During this morning's meeting, we solved all world conflict."; });\n const chatPipeline = traceable(async (question: string) => { const context = await myTool(question); const messages = [ - { role: "system", content: "You are a helpful assistant. Please respond to the user's request only based on the given context." }, - { role: "user", content: \`Question: \${question}\\nContext: \${context}\` } + { + role: "system", + content: + "You are a helpful assistant. Please respond to the user's request only based on the given context.", + }, + { role: "user", content: `Question: ${question}\nContext: ${context}` }, ]; const chatCompletion = await client.chat.completions.create({ model: "gpt-3.5-turbo", - messages: messages + messages: messages, }); return chatCompletion.choices[0].message.content; });\n -await chatPipeline("Can you summarize this morning's meetings?");`), +await chatPipeline("Can you summarize this morning's meetings?"); +`), APIBlock(`# To run the example below, ensure the environment variable OPENAI_API_KEY is set # Here, we'll show you to use the requests library in Python to log a trace, but you can use any HTTP client in any language. import openai From 03a8ec2ad5c6d95f38dd3ecd1445ae515ff144de Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Apr 2024 14:06:37 -0700 Subject: [PATCH 3/5] escape --- docs/tracing/faq/logging_and_viewing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tracing/faq/logging_and_viewing.mdx b/docs/tracing/faq/logging_and_viewing.mdx index 17dd1bfa..daa96ae0 100644 --- a/docs/tracing/faq/logging_and_viewing.mdx +++ b/docs/tracing/faq/logging_and_viewing.mdx @@ -150,7 +150,7 @@ const chatPipeline = traceable(async (question: string) => { content: "You are a helpful assistant. Please respond to the user's request only based on the given context.", }, - { role: "user", content: `Question: ${question}\nContext: ${context}` }, + { role: "user", content: \`Question: ${question}\nContext: ${context}\` }, ]; const chatCompletion = await client.chat.completions.create({ model: "gpt-3.5-turbo", From e38a0f0a5f402d9028b31f508a9b4e080db2af6b Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Apr 2024 14:14:57 -0700 Subject: [PATCH 4/5] log --- docs/tracing/faq/logging_and_viewing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tracing/faq/logging_and_viewing.mdx b/docs/tracing/faq/logging_and_viewing.mdx index daa96ae0..5f2fc67d 100644 --- a/docs/tracing/faq/logging_and_viewing.mdx +++ b/docs/tracing/faq/logging_and_viewing.mdx @@ -150,7 +150,7 @@ const chatPipeline = traceable(async (question: string) => { content: "You are a helpful assistant. Please respond to the user's request only based on the given context.", }, - { role: "user", content: \`Question: ${question}\nContext: ${context}\` }, + { role: "user", content: \`Question: $\{question\}\nContext: ${context}\` }, ]; const chatCompletion = await client.chat.completions.create({ model: "gpt-3.5-turbo", From a36d12186a3125ede919c8267c025369e5be58a5 Mon Sep 17 00:00:00 2001 From: William Fu-Hinthorn <13333726+hinthornw@users.noreply.github.com> Date: Mon, 15 Apr 2024 14:19:27 -0700 Subject: [PATCH 5/5] format --- docs/tracing/faq/logging_and_viewing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tracing/faq/logging_and_viewing.mdx b/docs/tracing/faq/logging_and_viewing.mdx index 5f2fc67d..da2f4adb 100644 --- a/docs/tracing/faq/logging_and_viewing.mdx +++ b/docs/tracing/faq/logging_and_viewing.mdx @@ -150,7 +150,7 @@ const chatPipeline = traceable(async (question: string) => { content: "You are a helpful assistant. Please respond to the user's request only based on the given context.", }, - { role: "user", content: \`Question: $\{question\}\nContext: ${context}\` }, + { role: "user", content: \`Question: $\{question\}\nContext: $\{context\}\` }, ]; const chatCompletion = await client.chat.completions.create({ model: "gpt-3.5-turbo",