mirror of
https://github.com/johndoe6345789/metabuilder.git
synced 2026-04-25 14:25:02 +00:00
Add full Python workflow execution engine with: Core Executor: - engine.py: WorkflowEngine for running n8n configs - n8n_executor.py: N8N-style workflow execution with connections - node_executor.py: Individual node execution with plugin dispatch - loop_executor.py: Loop node execution with iteration control - execution_order.py: Topological sort for node ordering Schema & Validation: - n8n_schema.py: N8N workflow schema types and validation - n8n_converter.py: Legacy to n8n schema conversion Plugin System: - plugin_loader.py: Dynamic plugin loading - plugin_registry.py: Plugin discovery and registration - plugin_map.json: 116 plugin type mappings Runtime & Context: - runtime.py: Workflow runtime container - input_resolver.py: Binding and coercion resolution - value_helpers.py: Value normalization helpers - workflow_context_builder.py: Runtime context assembly - workflow_config_loader.py: Configuration loading - workflow_engine_builder.py: Engine assembly with dependencies Utilities: - tool_calls_handler.py: LLM tool call handling - tool_runner.py: Tool execution with logging - notification_helpers.py: Slack/Discord notifications - workflow_adapter.py: N8N format handling - workflow_graph.py: Node/edge graph for visualization Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
23 lines
718 B
Python
23 lines
718 B
Python
"""Build workflow runtime context."""
|
|
import os
|
|
|
|
DEFAULT_MODEL = "openai/gpt-4o"
|
|
|
|
|
|
def resolve_model_name(prompt: dict) -> str:
|
|
"""Resolve model name from env or prompt."""
|
|
return os.environ.get("LLM_MODEL", prompt.get("model", DEFAULT_MODEL))
|
|
|
|
|
|
def build_workflow_context(parts: dict) -> dict:
|
|
"""Build the workflow context dict."""
|
|
context = dict(parts)
|
|
# Only resolve model if prompt is available, otherwise use default
|
|
if "prompt" in parts:
|
|
prompt = parts["prompt"]
|
|
context["model_name"] = resolve_model_name(prompt)
|
|
else:
|
|
# Workflow plugins will load prompt, model will be resolved then
|
|
context["model_name"] = resolve_model_name({})
|
|
return context
|