Skip to content

Commit

Permalink
Merge branch 'main' of github.com:nodetool-ai/nodetool
Browse files Browse the repository at this point in the history
  • Loading branch information
heavy-d committed Jan 12, 2025
2 parents c543a5b + c5275a2 commit 93038af
Show file tree
Hide file tree
Showing 3 changed files with 175 additions and 27 deletions.
141 changes: 114 additions & 27 deletions src/nodetool/metadata/nodes.json
Original file line number Diff line number Diff line change
Expand Up @@ -22381,9 +22381,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 437080,
"minute": 20,
"second": 45,
"microsecond": 654886,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23423,9 +23423,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 441033,
"minute": 20,
"second": 45,
"microsecond": 658766,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23476,9 +23476,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 441286,
"minute": 20,
"second": 45,
"microsecond": 659014,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23529,9 +23529,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 441487,
"minute": 20,
"second": 45,
"microsecond": 659222,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23582,9 +23582,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 441704,
"minute": 20,
"second": 45,
"microsecond": 659433,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23635,9 +23635,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 441908,
"minute": 20,
"second": 45,
"microsecond": 659641,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23688,9 +23688,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 442178,
"minute": 20,
"second": 45,
"microsecond": 659900,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23741,9 +23741,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 442397,
"minute": 20,
"second": 45,
"microsecond": 660105,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -23810,9 +23810,9 @@
"month": 1,
"day": 12,
"hour": 21,
"minute": 3,
"second": 54,
"microsecond": 442645,
"minute": 20,
"second": 45,
"microsecond": 660342,
"tzinfo": "UTC",
"utc_offset": 0.0
},
Expand Down Expand Up @@ -74899,6 +74899,93 @@
"recommended_models": [],
"basic_fields": []
},
{
"title": "Any LLM",
"description": "Use any large language model from a selected catalogue (powered by OpenRouter).\n Supports various models including Claude 3, Gemini, Llama, and GPT-4.",
"namespace": "fal.llm",
"node_type": "fal.llm.AnyLLM",
"layout": "default",
"properties": [
{
"name": "prompt",
"type": {
"type": "str",
"optional": false,
"values": null,
"type_args": [],
"type_name": null
},
"default": "",
"title": "Prompt",
"description": "The prompt to send to the language model",
"min": null,
"max": null
},
{
"name": "system_prompt",
"type": {
"type": "str",
"optional": false,
"values": null,
"type_args": [],
"type_name": null
},
"default": "",
"title": "System Prompt",
"description": "Optional system prompt to provide context or instructions",
"min": null,
"max": null
},
{
"name": "model",
"type": {
"type": "enum",
"optional": false,
"values": [
"anthropic/claude-3.5-sonnet",
"anthropic/claude-3-5-haiku",
"anthropic/claude-3-haiku",
"google/gemini-pro-1.5",
"google/gemini-flash-1.5",
"google/gemini-flash-1.5-8b",
"meta-llama/llama-3.2-1b-instruct",
"meta-llama/llama-3.2-3b-instruct",
"meta-llama/llama-3.1-8b-instruct",
"meta-llama/llama-3.1-70b-instruct",
"openai/gpt-4o-mini",
"openai/gpt-4o"
],
"type_args": [],
"type_name": "nodetool.nodes.fal.llm.ModelEnum"
},
"default": "google/gemini-flash-1.5",
"title": "Model",
"description": "The language model to use for the completion",
"min": null,
"max": null
}
],
"outputs": [
{
"type": {
"type": "str",
"optional": false,
"values": null,
"type_args": [],
"type_name": null
},
"name": "output",
"stream": false
}
],
"the_model_info": {},
"recommended_models": [],
"basic_fields": [
"prompt",
"model",
"system_prompt"
]
},
{
"title": "Ideogram V 2",
"description": "Ideogram V2 is a state-of-the-art image generation model optimized for commercial and creative use,\n featuring exceptional typography handling and realistic outputs.",
Expand Down
1 change: 1 addition & 0 deletions src/nodetool/nodes/fal/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import nodetool.nodes.fal.llm
import nodetool.nodes.fal.text_to_image
import nodetool.nodes.fal.text_to_audio
import nodetool.nodes.fal.speech_to_text
Expand Down
60 changes: 60 additions & 0 deletions src/nodetool/nodes/fal/llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from enum import Enum
from pydantic import Field
from nodetool.nodes.fal.fal_node import FALNode
from nodetool.workflows.processing_context import ProcessingContext


class ModelEnum(str, Enum):
CLAUDE_3_SONNET = "anthropic/claude-3.5-sonnet"
CLAUDE_3_HAIKU = "anthropic/claude-3-5-haiku"
CLAUDE_3_HAIKU_LEGACY = "anthropic/claude-3-haiku"
GEMINI_PRO = "google/gemini-pro-1.5"
GEMINI_FLASH = "google/gemini-flash-1.5"
GEMINI_FLASH_8B = "google/gemini-flash-1.5-8b"
LLAMA_1B = "meta-llama/llama-3.2-1b-instruct"
LLAMA_3B = "meta-llama/llama-3.2-3b-instruct"
LLAMA_8B = "meta-llama/llama-3.1-8b-instruct"
LLAMA_70B = "meta-llama/llama-3.1-70b-instruct"
GPT4_MINI = "openai/gpt-4o-mini"
GPT4 = "openai/gpt-4o"


class AnyLLM(FALNode):
"""
Use any large language model from a selected catalogue (powered by OpenRouter).
Supports various models including Claude 3, Gemini, Llama, and GPT-4.
"""

prompt: str = Field(
default="", description="The prompt to send to the language model"
)
system_prompt: str = Field(
default="",
description="Optional system prompt to provide context or instructions",
)
model: ModelEnum = Field(
default=ModelEnum.GEMINI_FLASH,
description="The language model to use for the completion",
)

async def process(self, context: ProcessingContext) -> str:
"""
Process the prompt using the selected language model.
Returns:
dict: Contains the generated output text and status information
"""
arguments = {"prompt": self.prompt, "model": self.model.value}

if self.system_prompt:
arguments["system_prompt"] = self.system_prompt

result = await self.submit_request(
context=context, application="fal-ai/any-llm", arguments=arguments
)

return result["output"]

@classmethod
def get_basic_fields(cls):
return ["prompt", "model", "system_prompt"]

0 comments on commit 93038af

Please sign in to comment.