Skip to content

Commit

Permalink
lint ipynb
Browse files Browse the repository at this point in the history
  • Loading branch information
jpt-sn committed Jan 10, 2025
1 parent 27cf6ed commit b199fa6
Show file tree
Hide file tree
Showing 3 changed files with 69 additions and 64 deletions.
66 changes: 34 additions & 32 deletions examples/intro_clean.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
"# if not os.path.exists(llm_cache_path):\n",
"# llm_cache_path = f\"../{llm_cache_path}\"\n",
"# assert os.path.exists(llm_cache_path)\n",
"# llms._REPLAY_SQLITE = llm_cache_path\n"
"# llms._REPLAY_SQLITE = llm_cache_path"
]
},
{
Expand All @@ -91,8 +91,8 @@
"source": [
"from tapeagents.agent import Agent, Node\n",
"from tapeagents.core import Prompt, SetNextNode\n",
"from tapeagents.dialog_tape import AssistantStep, UserStep, DialogTape\n",
"from tapeagents.llms import LLMStream, LiteLLM\n",
"from tapeagents.dialog_tape import AssistantStep, DialogTape, UserStep\n",
"from tapeagents.llms import LiteLLM, LLMStream\n",
"from tapeagents.prompting import tape_to_messages\n",
"\n",
"llm = LiteLLM(model_name=\"gpt-4o-mini\")\n",
Expand All @@ -113,7 +113,7 @@
"agent = Agent[DialogTape].create(llm, nodes=[MainNode()])\n",
"start_tape = DialogTape(steps=[UserStep(content=\"Tell me about Vulcan in 3 sentences\")])\n",
"final_tape = agent.run(start_tape).get_final_tape() # agent will start executing the first node\n",
"print(f\"Final tape: {final_tape.model_dump_json(indent=2)}\")\n"
"print(f\"Final tape: {final_tape.model_dump_json(indent=2)}\")"
]
},
{
Expand Down Expand Up @@ -142,7 +142,7 @@
"# specifying different Context and Step types. In the output of this cell,\n",
"# look at Union[UserStep, AssistantStep, ...]\n",
"# for the list of possible step types in the DialogTape.\n",
"DialogTape\n"
"DialogTape"
]
},
{
Expand Down Expand Up @@ -175,7 +175,7 @@
"# Almost all classes in TapeAgents are Pydantic base models.\n",
"# This allows easy validation, serialization and instrospection. For example,\n",
"# here we are able to list all the fields in the Prompt model.\n",
"Prompt.model_fields\n"
"Prompt.model_fields"
]
},
{
Expand All @@ -202,7 +202,7 @@
"# (note: you can not use Prompt object for more than 1 LLM call in TapeAgents)\n",
"prompt = Prompt(messages=[{\"role\": \"user\", \"content\": \"Write hello world in C\"}])\n",
"print(\"\\n\" + \"-\" * 30)\n",
"print(llm_stream.generate(prompt).get_text())\n"
"print(llm_stream.generate(prompt).get_text())"
]
},
{
Expand All @@ -220,7 +220,7 @@
"source": [
"print((user := UserStep(content=\"hi AI!\")).llm_dict())\n",
"print((assistant := AssistantStep(content=\"hello human\")).llm_dict())\n",
"print(tape_to_messages(DialogTape(steps=[user, assistant])))\n"
"print(tape_to_messages(DialogTape(steps=[user, assistant])))"
]
},
{
Expand Down Expand Up @@ -258,7 +258,7 @@
"print(\"--- ALL TEXT ---\")\n",
"print(text.text)\n",
"print(\"--- PREDICTED CHARACTERS ---\")\n",
"print(text.output_text)\n"
"print(text.output_text)"
]
},
{
Expand Down Expand Up @@ -323,7 +323,7 @@
"# Step 3: generate steps that the agent will then add to the tape\n",
"print(\"Produced Steps:\")\n",
"for step in node.generate_steps(agent, start_tape, stream):\n",
" print(step.model_dump_json(indent=2))\n"
" print(step.model_dump_json(indent=2))"
]
},
{
Expand All @@ -346,8 +346,8 @@
"metadata": {},
"outputs": [],
"source": [
"from tapeagents.view import TapeViewStack\n",
"from tapeagents.core import StepMetadata\n",
"from tapeagents.view import TapeViewStack\n",
"\n",
"# The \"top\" view in the tape view stack is the view of the current agent.\n",
"# Initially `top.last_node` is empty and the agent will run the first node from its list\".\n",
Expand Down Expand Up @@ -380,7 +380,7 @@
")\n",
"next_node = TapeViewStack.compute(tape3).top.next_node\n",
"print(f\"3: {next_node}\")\n",
"assert next_node == \"act\"\n"
"assert next_node == \"act\""
]
},
{
Expand All @@ -396,12 +396,12 @@
"metadata": {},
"outputs": [],
"source": [
"from tapeagents.core import Action, Pass, Thought, Observation\n",
"from tapeagents.core import Action, Observation, Pass, Thought\n",
"from tapeagents.dialog_tape import AssistantThought, ToolCalls, ToolResult\n",
"\n",
"assert all([issubclass(step_class, Action) for step_class in [AssistantStep, ToolCalls]])\n",
"assert all([issubclass(step_class, Thought) for step_class in [AssistantThought, SetNextNode, Pass]])\n",
"assert all([issubclass(step_class, Observation) for step_class in [UserStep, ToolResult]])\n"
"assert all([issubclass(step_class, Observation) for step_class in [UserStep, ToolResult]])"
]
},
{
Expand Down Expand Up @@ -442,7 +442,7 @@
"source": [
"tape_to_continue = final_tape + [UserStep(content=\"No, I mean Vulcan the company\")]\n",
"continued_tape = agent.run(tape_to_continue).get_final_tape()\n",
"print(continued_tape.model_dump_json(indent=2))\n"
"print(continued_tape.model_dump_json(indent=2))"
]
},
{
Expand All @@ -469,11 +469,12 @@
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import HTML\n",
"\n",
"from tapeagents.renderers import render_tape_with_prompts\n",
"from tapeagents.renderers.pretty import PrettyRenderer\n",
"from IPython.display import HTML\n",
"\n",
"HTML(render_tape_with_prompts(continued_tape, PrettyRenderer()))\n"
"HTML(render_tape_with_prompts(continued_tape, PrettyRenderer()))"
]
},
{
Expand Down Expand Up @@ -565,7 +566,7 @@
" print(event.observation.model_dump_json(indent=2))\n",
"assert final_tape1\n",
"print(\"Final tape:\")\n",
"HTML(render_tape_with_prompts(final_tape1, PrettyRenderer()))\n"
"HTML(render_tape_with_prompts(final_tape1, PrettyRenderer()))"
]
},
{
Expand Down Expand Up @@ -615,7 +616,7 @@
"\n",
"if \"HF_TOKEN\" not in os.environ:\n",
" # We need this to acces the model's tokenizer\n",
" os.environ[\"HF_TOKEN\"] = \"<your-hugging-face-api-key>\"\n"
" os.environ[\"HF_TOKEN\"] = \"<your-hugging-face-api-key>\""
]
},
{
Expand All @@ -638,9 +639,9 @@
"outputs": [],
"source": [
"import json\n",
"\n",
"from tapeagents.dialog_tape import FunctionCall, ToolCall\n",
"from tapeagents.llms import TrainableLLM\n",
"\n",
"from tapeagents.prompting import step_to_message\n",
"\n",
"env = ToolEnvironment([get_stock_ticker, get_stock_data])\n",
Expand Down Expand Up @@ -771,7 +772,7 @@
" \"call_or_respond\": call_or_respond_guidance,\n",
" },\n",
" nodes=[PlanNode(), ActNode()],\n",
")\n"
")"
]
},
{
Expand Down Expand Up @@ -801,7 +802,7 @@
" print(event.observation.model_dump_json(indent=2))\n",
"assert final_tape2 is not None\n",
"print(\"Final tape:\")\n",
"HTML(render_tape_with_prompts(final_tape2, PrettyRenderer()))\n"
"HTML(render_tape_with_prompts(final_tape2, PrettyRenderer()))"
]
},
{
Expand All @@ -824,7 +825,7 @@
" \"REMEMBER: check what tool calls you have already made. Do not do the same call again!\"\n",
")\n",
"resume_from_step8 = agent2b.run(failed_tape[:8]).get_final_tape()\n",
"HTML(render_tape_with_prompts(resume_from_step8, PrettyRenderer()))\n"
"HTML(render_tape_with_prompts(resume_from_step8, PrettyRenderer()))"
]
},
{
Expand Down Expand Up @@ -878,7 +879,7 @@
"example_text = agent2.make_training_text(list(llm_calls.values())[0])\n",
"print(\"From the first retrieved LLM call, the LLM will be trained to predict this text:\")\n",
"print(\"---\")\n",
"print(example_text.output_text)\n"
"print(example_text.output_text)"
]
},
{
Expand All @@ -904,7 +905,7 @@
"outputs": [],
"source": [
"reused_tape, _ = agent2.reuse(final_tape1)\n",
"HTML(render_tape_with_prompts(reused_tape, PrettyRenderer()))\n"
"HTML(render_tape_with_prompts(reused_tape, PrettyRenderer()))"
]
},
{
Expand All @@ -921,7 +922,7 @@
"outputs": [],
"source": [
"training_data = agent2.make_training_data(final_tape1)\n",
"print(training_data[0].output_text)\n"
"print(training_data[0].output_text)"
]
},
{
Expand Down Expand Up @@ -977,7 +978,7 @@
"main_agent_env = ToolEnvironment([get_stock_ticker, get_stock_data, call_search_agent])\n",
"whole_env = ToolEnvironment(\n",
" [get_stock_ticker, get_stock_data, browser.get_search_results, browser.get_page, browser.get_next_page]\n",
")\n"
")"
]
},
{
Expand Down Expand Up @@ -1021,7 +1022,7 @@
" print(f\"-- {view.agent_full_name}: {step_summary}\")\n",
"# Note how \"root/xyz\" view appears after step 1 and disappears after step 3.\n",
"# Also note how \"root\" does not see private thoughts of \"xyz\" (step 2), and \"xyz\"\n",
"# does not see the initial observation of root (step 0)\n"
"# does not see the initial observation of root (step 0)"
]
},
{
Expand All @@ -1037,8 +1038,8 @@
"metadata": {},
"outputs": [],
"source": [
"from tapeagents.nodes import FixedStepsNode\n",
"from tapeagents.core import Respond\n",
"from tapeagents.nodes import FixedStepsNode\n",
"from tapeagents.prompting import view_to_messages\n",
"\n",
"search_system_instruction = \"Use at most 5 tool calls to search the request info on on the web.\"\n",
Expand Down Expand Up @@ -1082,7 +1083,7 @@
" final_tape = ae.partial_tape\n",
" break\n",
"assert final_tape\n",
"HTML(render_tape_with_prompts(final_tape, PrettyRenderer()))\n"
"HTML(render_tape_with_prompts(final_tape, PrettyRenderer()))"
]
},
{
Expand All @@ -1098,11 +1099,12 @@
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import clear_output\n",
"\n",
"from tapeagents.core import SetNextNode\n",
"from tapeagents.dialog_tape import AssistantThought, ToolCalls\n",
"from tapeagents.orchestrator import MainLoopStatus, main_loop\n",
"from tapeagents.view import Call\n",
"from IPython.display import clear_output\n",
"\n",
"system_instruction = f\"\"\"\n",
"You will help the user to learn about financials of companies. \n",
Expand Down Expand Up @@ -1187,7 +1189,7 @@
" # if event.env_tape:\n",
" # input(\"Press Enter the run the next iteration of the main loop\")\n",
" if event.status == MainLoopStatus.EXTERNAL_INPUT_NEEDED:\n",
" break\n"
" break"
]
},
{
Expand Down
Loading

0 comments on commit b199fa6

Please sign in to comment.