From c5275a238220be2cea7599ec4a5fd7b318a1e3af Mon Sep 17 00:00:00 2001 From: Matthias Georgi Date: Sun, 12 Jan 2025 21:22:27 +0100 Subject: [PATCH] add fal llm --- src/nodetool/metadata/nodes.json | 141 +++++++++++++++++++++++------ src/nodetool/nodes/fal/__init__.py | 1 + src/nodetool/nodes/fal/llm.py | 60 ++++++++++++ 3 files changed, 175 insertions(+), 27 deletions(-) create mode 100644 src/nodetool/nodes/fal/llm.py diff --git a/src/nodetool/metadata/nodes.json b/src/nodetool/metadata/nodes.json index 8c9bc02d..da179aac 100644 --- a/src/nodetool/metadata/nodes.json +++ b/src/nodetool/metadata/nodes.json @@ -22381,9 +22381,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 437080, + "minute": 20, + "second": 45, + "microsecond": 654886, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23423,9 +23423,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 441033, + "minute": 20, + "second": 45, + "microsecond": 658766, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23476,9 +23476,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 441286, + "minute": 20, + "second": 45, + "microsecond": 659014, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23529,9 +23529,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 441487, + "minute": 20, + "second": 45, + "microsecond": 659222, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23582,9 +23582,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 441704, + "minute": 20, + "second": 45, + "microsecond": 659433, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23635,9 +23635,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 441908, + "minute": 20, + "second": 45, + "microsecond": 659641, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23688,9 +23688,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 442178, + "minute": 20, + "second": 45, + "microsecond": 659900, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23741,9 +23741,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 442397, + "minute": 20, + "second": 45, + "microsecond": 660105, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -23810,9 +23810,9 @@ "month": 1, "day": 12, "hour": 21, - "minute": 3, - "second": 54, - "microsecond": 442645, + "minute": 20, + "second": 45, + "microsecond": 660342, "tzinfo": "UTC", "utc_offset": 0.0 }, @@ -74899,6 +74899,93 @@ "recommended_models": [], "basic_fields": [] }, + { + "title": "Any LLM", + "description": "Use any large language model from a selected catalogue (powered by OpenRouter).\n Supports various models including Claude 3, Gemini, Llama, and GPT-4.", + "namespace": "fal.llm", + "node_type": "fal.llm.AnyLLM", + "layout": "default", + "properties": [ + { + "name": "prompt", + "type": { + "type": "str", + "optional": false, + "values": null, + "type_args": [], + "type_name": null + }, + "default": "", + "title": "Prompt", + "description": "The prompt to send to the language model", + "min": null, + "max": null + }, + { + "name": "system_prompt", + "type": { + "type": "str", + "optional": false, + "values": null, + "type_args": [], + "type_name": null + }, + "default": "", + "title": "System Prompt", + "description": "Optional system prompt to provide context or instructions", + "min": null, + "max": null + }, + { + "name": "model", + "type": { + "type": "enum", + "optional": false, + "values": [ + "anthropic/claude-3.5-sonnet", + "anthropic/claude-3-5-haiku", + "anthropic/claude-3-haiku", + "google/gemini-pro-1.5", + "google/gemini-flash-1.5", + "google/gemini-flash-1.5-8b", + "meta-llama/llama-3.2-1b-instruct", + "meta-llama/llama-3.2-3b-instruct", + "meta-llama/llama-3.1-8b-instruct", + "meta-llama/llama-3.1-70b-instruct", + "openai/gpt-4o-mini", + "openai/gpt-4o" + ], + "type_args": [], + "type_name": "nodetool.nodes.fal.llm.ModelEnum" + }, + "default": "google/gemini-flash-1.5", + "title": "Model", + "description": "The language model to use for the completion", + "min": null, + "max": null + } + ], + "outputs": [ + { + "type": { + "type": "str", + "optional": false, + "values": null, + "type_args": [], + "type_name": null + }, + "name": "output", + "stream": false + } + ], + "the_model_info": {}, + "recommended_models": [], + "basic_fields": [ + "prompt", + "model", + "system_prompt" + ] + }, { "title": "Ideogram V 2", "description": "Ideogram V2 is a state-of-the-art image generation model optimized for commercial and creative use,\n featuring exceptional typography handling and realistic outputs.", diff --git a/src/nodetool/nodes/fal/__init__.py b/src/nodetool/nodes/fal/__init__.py index 5a1d86be..45f4667f 100644 --- a/src/nodetool/nodes/fal/__init__.py +++ b/src/nodetool/nodes/fal/__init__.py @@ -1,3 +1,4 @@ +import nodetool.nodes.fal.llm import nodetool.nodes.fal.text_to_image import nodetool.nodes.fal.text_to_audio import nodetool.nodes.fal.speech_to_text diff --git a/src/nodetool/nodes/fal/llm.py b/src/nodetool/nodes/fal/llm.py new file mode 100644 index 00000000..c2dd74ab --- /dev/null +++ b/src/nodetool/nodes/fal/llm.py @@ -0,0 +1,60 @@ +from enum import Enum +from pydantic import Field +from nodetool.nodes.fal.fal_node import FALNode +from nodetool.workflows.processing_context import ProcessingContext + + +class ModelEnum(str, Enum): + CLAUDE_3_SONNET = "anthropic/claude-3.5-sonnet" + CLAUDE_3_HAIKU = "anthropic/claude-3-5-haiku" + CLAUDE_3_HAIKU_LEGACY = "anthropic/claude-3-haiku" + GEMINI_PRO = "google/gemini-pro-1.5" + GEMINI_FLASH = "google/gemini-flash-1.5" + GEMINI_FLASH_8B = "google/gemini-flash-1.5-8b" + LLAMA_1B = "meta-llama/llama-3.2-1b-instruct" + LLAMA_3B = "meta-llama/llama-3.2-3b-instruct" + LLAMA_8B = "meta-llama/llama-3.1-8b-instruct" + LLAMA_70B = "meta-llama/llama-3.1-70b-instruct" + GPT4_MINI = "openai/gpt-4o-mini" + GPT4 = "openai/gpt-4o" + + +class AnyLLM(FALNode): + """ + Use any large language model from a selected catalogue (powered by OpenRouter). + Supports various models including Claude 3, Gemini, Llama, and GPT-4. + """ + + prompt: str = Field( + default="", description="The prompt to send to the language model" + ) + system_prompt: str = Field( + default="", + description="Optional system prompt to provide context or instructions", + ) + model: ModelEnum = Field( + default=ModelEnum.GEMINI_FLASH, + description="The language model to use for the completion", + ) + + async def process(self, context: ProcessingContext) -> str: + """ + Process the prompt using the selected language model. + + Returns: + dict: Contains the generated output text and status information + """ + arguments = {"prompt": self.prompt, "model": self.model.value} + + if self.system_prompt: + arguments["system_prompt"] = self.system_prompt + + result = await self.submit_request( + context=context, application="fal-ai/any-llm", arguments=arguments + ) + + return result["output"] + + @classmethod + def get_basic_fields(cls): + return ["prompt", "model", "system_prompt"]