Merge pull request #6 from johndoe6345789/copilot/delete-autometabuilder-plugins

Migrate app bootstrap to workflow architecture, delete legacy plugins directory
This commit is contained in:
2026-01-10 13:29:21 +00:00
committed by GitHub
17 changed files with 631 additions and 167 deletions

View File

@@ -1,19 +1,10 @@
"""Application runner."""
import logging
import os
from . import load_messages
from .cli_args import parse_args
from .env_loader import load_env
from .github_service import create_github_integration
from .logging_config import configure_logging
from .metadata_loader import load_metadata
from .openai_factory import create_openai_client
from .plugin_loader import load_plugins
from .prompt_loader import load_prompt_yaml
from .tool_map_builder import build_tool_map
from .tool_policy_loader import load_tool_policies
from .tool_registry_loader import load_tool_registry
from .tools_loader import load_tools
from .web.server import start_web_ui
from .workflow_config_loader import load_workflow_config
from .workflow_context_builder import build_workflow_context
@@ -32,33 +23,21 @@ def run_app() -> None:
start_web_ui()
return
msgs = load_messages()
token = os.environ.get("GITHUB_TOKEN")
if not token:
logger.error(msgs["error_github_token_missing"])
logger.error("GITHUB_TOKEN environment variable is required")
return
gh = create_github_integration(token, msgs)
client = create_openai_client(token)
prompt = load_prompt_yaml()
metadata = load_metadata()
tools = load_tools(metadata)
tool_map = build_tool_map(gh, load_tool_registry())
load_plugins(tool_map, tools)
# Build minimal workflow context - workflow plugins handle initialization
context_parts = {
"args": args,
"gh": gh,
"msgs": msgs,
"client": client,
"tools": tools,
"tool_map": tool_map,
"prompt": prompt,
"tool_policies": load_tool_policies()
"github_token": token
}
workflow_context = build_workflow_context(context_parts)
logger.debug("Workflow context ready with %s tools", len(tool_map))
engine = build_workflow_engine(load_workflow_config(metadata), workflow_context, logger)
metadata = load_metadata()
workflow_config = load_workflow_config(metadata)
logger.info("Starting workflow: %s", workflow_config.get("name", "Unnamed"))
engine = build_workflow_engine(workflow_config, workflow_context, logger)
engine.execute()

View File

@@ -1,6 +1,6 @@
{
"tools_path": "tools",
"workflow_path": "workflow.json",
"workflow_path": "packages/default_app_workflow/workflow.json",
"workflow_packages_path": "packages",
"messages": {
"en": "messages/en",

View File

@@ -0,0 +1,242 @@
# Default Application Workflow
This workflow package provides a comprehensive, production-ready workflow that combines backend initialization with an iterative AI agent loop. It demonstrates the "dogfooding" approach where AutoMetabuilder's own application logic is expressed as a declarative workflow.
## Overview
The Default Application Workflow is a complete end-to-end workflow that:
1. **Bootstraps the backend** - Loads all necessary configuration, clients, and tools
2. **Executes the AI loop** - Runs the core AutoMetabuilder agent with tool calling capabilities
This workflow replaces the imperative Python code that was previously in `app_runner.py`, making the application logic:
- **Declarative** - Expressed as data (JSON) rather than code
- **Visual** - Can be visualized as a node graph
- **Testable** - Each node can be tested independently
- **Modular** - Easy to modify, extend, or replace nodes
## Workflow Structure
### Phase 1: Backend Bootstrap (9 nodes)
These nodes initialize all backend services and dependencies:
1. **Load Messages** (`backend.load_messages`)
- Loads internationalized translation messages
- Stores in `runtime.context["msgs"]`
2. **Load Metadata** (`backend.load_metadata`)
- Loads `metadata.json` configuration
- Stores in `runtime.context["metadata"]`
3. **Load Prompt** (`backend.load_prompt`)
- Loads `prompt.yml` configuration
- Resolves model name from environment or prompt
- Stores in `runtime.context["prompt"]` and `runtime.context["model_name"]`
4. **Create GitHub Client** (`backend.create_github`)
- Initializes GitHub API client
- Requires `GITHUB_TOKEN` environment variable
- Stores in `runtime.context["gh"]`
5. **Create OpenAI Client** (`backend.create_openai`)
- Initializes OpenAI/LLM client
- Uses GitHub token for authentication
- Stores in `runtime.context["client"]`
6. **Load Tools** (`backend.load_tools`)
- Loads tool definitions from metadata
- Stores in `runtime.context["tools"]`
7. **Build Tool Map** (`backend.build_tool_map`)
- Creates callable tool registry
- Maps tool names to implementations
- Stores in `runtime.context["tool_map"]`
8. **Load Plugins** (`backend.load_plugins`)
- Loads any custom user plugins
- Registers them in the tool map
9. **Load Tool Policies** (`backend.load_tool_policies`)
- Loads tool execution policies
- Defines which tools require confirmation
- Stores in `runtime.context["tool_policies"]`
### Phase 2: AI Agent Loop (8 nodes)
These nodes execute the core AutoMetabuilder agent:
1. **Load Context** (`core.load_context`)
- Loads SDLC context (roadmap, issues, PRs)
- Provides situational awareness
2. **Seed Messages** (`core.seed_messages`)
- Initializes empty message array
- Prepares conversation state
3. **Append Context** (`core.append_context_message`)
- Adds SDLC context to messages
- Gives AI awareness of repository state
4. **Append User Instruction** (`core.append_user_instruction`)
- Adds user's task instruction
- Defines what the AI should accomplish
5. **Main Loop** (`control.loop`)
- Iterative execution controller
- Runs up to 10 iterations
- Stops when AI has no more tool calls
6. **AI Request** (`core.ai_request`)
- Sends messages to LLM
- Gets back response and optional tool calls
7. **Run Tool Calls** (`core.run_tool_calls`)
- Executes requested tool calls
- Handles confirmation prompts
- Returns results
8. **Append Tool Results** (`core.append_tool_results`)
- Adds tool results to messages
- Loops back to Main Loop for next iteration
## Usage
This workflow is automatically loaded when you run AutoMetabuilder:
```bash
# Set in metadata.json
{
"workflow_path": "packages/default_app_workflow/workflow.json"
}
# Then run
autometabuilder
```
The `app_runner.py` module now simply:
1. Loads environment and configuration
2. Parses command line arguments
3. Loads this workflow
4. Executes it
## Benefits of Workflow-Based Architecture
### 1. Separation of Concerns
- Backend initialization is isolated from AI logic
- Each phase can be tested independently
- Easy to add new initialization steps
### 2. Flexibility
- Swap out individual nodes without touching code
- Try different AI loop strategies
- Add monitoring or logging nodes
### 3. Observability
- Clear execution order
- Easy to trace data flow
- Can add debug nodes at any point
### 4. Extensibility
- Create variant workflows for different use cases
- Mix and match nodes from other packages
- Build custom workflows without code changes
## Data Flow
```
Environment Variables (GITHUB_TOKEN, LLM_MODEL)
Backend Bootstrap Phase
runtime.context populated with:
- msgs (translations)
- metadata (config)
- prompt (agent instructions)
- model_name (LLM to use)
- gh (GitHub client)
- client (OpenAI client)
- tools (tool definitions)
- tool_map (callable tools)
- tool_policies (execution policies)
AI Agent Loop Phase
Iterative execution:
- Load SDLC context
- Send to LLM with tools
- Execute tool calls
- Append results
- Repeat until done
```
## Customization
To create a custom variant:
1. Copy this package:
```bash
cp -r packages/default_app_workflow packages/my_custom_workflow
```
2. Edit `workflow.json`:
- Add/remove nodes
- Change connections
- Modify parameters
3. Update `package.json`:
```json
{
"name": "my_custom_workflow",
"description": "My custom AutoMetabuilder workflow"
}
```
4. Update `metadata.json`:
```json
{
"workflow_path": "packages/my_custom_workflow/workflow.json"
}
```
## Related Workflows
- **backend_bootstrap** - Just the initialization phase, useful for testing
- **single_pass** - One-shot AI request without iteration
- **iterative_loop** - Just the AI loop, assumes backend is initialized
- **plan_execute_summarize** - Multi-phase workflow with explicit planning
## Technical Notes
### Runtime Context vs Store
- **Context** (`runtime.context`): Immutable configuration and dependencies
- Set once during bootstrap
- Available to all nodes
- Contains clients, tools, settings
- **Store** (`runtime.store`): Mutable execution state
- Changes during execution
- Node outputs stored here
- Temporary working data
### Plugin Responsibility
Backend workflow plugins (`backend.*`) have dual responsibility:
1. Return result in output dict (for store)
2. Update `runtime.context` directly (for downstream plugins)
This ensures both workflow data flow and imperative access work correctly.
## Version History
- **1.0.0** - Initial release combining backend bootstrap and AI loop
- Replaces imperative `app_runner.py` logic
- Enables "dogfooding" of workflow architecture
- 17 nodes total: 9 bootstrap + 8 AI loop
## See Also
- [Workflow Architecture](../../WORKFLOW_ARCHITECTURE.md)
- [Workflow Plugin Expansion](../../WORKFLOW_PLUGIN_EXPANSION.md)
- [Workflow Plugins README](../../workflow/plugins/README.md)

View File

@@ -0,0 +1,7 @@
{
"name": "default_app_workflow",
"version": "1.0.0",
"description": "Default application workflow with backend bootstrap and iterative AI loop",
"keywords": ["backend", "bootstrap", "ai", "iterative", "default"],
"license": "MIT"
}

View File

@@ -0,0 +1,335 @@
{
"name": "Default Application Workflow",
"active": false,
"nodes": [
{
"id": "load_messages",
"name": "Load Messages",
"type": "backend.load_messages",
"typeVersion": 1,
"position": [0, 0],
"parameters": {}
},
{
"id": "load_metadata",
"name": "Load Metadata",
"type": "backend.load_metadata",
"typeVersion": 1,
"position": [300, 0],
"parameters": {}
},
{
"id": "load_prompt",
"name": "Load Prompt",
"type": "backend.load_prompt",
"typeVersion": 1,
"position": [600, 0],
"parameters": {}
},
{
"id": "create_github",
"name": "Create GitHub Client",
"type": "backend.create_github",
"typeVersion": 1,
"position": [900, 0],
"parameters": {}
},
{
"id": "create_openai",
"name": "Create OpenAI Client",
"type": "backend.create_openai",
"typeVersion": 1,
"position": [1200, 0],
"parameters": {}
},
{
"id": "load_tools",
"name": "Load Tools",
"type": "backend.load_tools",
"typeVersion": 1,
"position": [1500, 0],
"parameters": {}
},
{
"id": "build_tool_map",
"name": "Build Tool Map",
"type": "backend.build_tool_map",
"typeVersion": 1,
"position": [1800, 0],
"parameters": {}
},
{
"id": "load_plugins",
"name": "Load Plugins",
"type": "backend.load_plugins",
"typeVersion": 1,
"position": [2100, 0],
"parameters": {}
},
{
"id": "load_tool_policies",
"name": "Load Tool Policies",
"type": "backend.load_tool_policies",
"typeVersion": 1,
"position": [2400, 0],
"parameters": {}
},
{
"id": "load_context",
"name": "Load Context",
"type": "core.load_context",
"typeVersion": 1,
"position": [0, 300],
"parameters": {}
},
{
"id": "seed_messages",
"name": "Seed Messages",
"type": "core.seed_messages",
"typeVersion": 1,
"position": [300, 300],
"parameters": {}
},
{
"id": "append_context",
"name": "Append Context",
"type": "core.append_context_message",
"typeVersion": 1,
"position": [600, 300],
"parameters": {}
},
{
"id": "append_user_instruction",
"name": "Append User Instruction",
"type": "core.append_user_instruction",
"typeVersion": 1,
"position": [900, 300],
"parameters": {}
},
{
"id": "main_loop",
"name": "Main Loop",
"type": "control.loop",
"typeVersion": 1,
"position": [1200, 300],
"parameters": {
"max_iterations": 10,
"stop_when": "$no_tool_calls",
"stop_on": "true"
}
},
{
"id": "ai_request",
"name": "AI Request",
"type": "core.ai_request",
"typeVersion": 1,
"position": [1500, 300],
"parameters": {}
},
{
"id": "run_tool_calls",
"name": "Run Tool Calls",
"type": "core.run_tool_calls",
"typeVersion": 1,
"position": [1800, 300],
"parameters": {}
},
{
"id": "append_tool_results",
"name": "Append Tool Results",
"type": "core.append_tool_results",
"typeVersion": 1,
"position": [2100, 300],
"parameters": {}
}
],
"connections": {
"Load Messages": {
"main": {
"0": [
{
"node": "Load Metadata",
"type": "main",
"index": 0
}
]
}
},
"Load Metadata": {
"main": {
"0": [
{
"node": "Load Prompt",
"type": "main",
"index": 0
}
]
}
},
"Load Prompt": {
"main": {
"0": [
{
"node": "Create GitHub Client",
"type": "main",
"index": 0
}
]
}
},
"Create GitHub Client": {
"main": {
"0": [
{
"node": "Create OpenAI Client",
"type": "main",
"index": 0
}
]
}
},
"Create OpenAI Client": {
"main": {
"0": [
{
"node": "Load Tools",
"type": "main",
"index": 0
}
]
}
},
"Load Tools": {
"main": {
"0": [
{
"node": "Build Tool Map",
"type": "main",
"index": 0
}
]
}
},
"Build Tool Map": {
"main": {
"0": [
{
"node": "Load Plugins",
"type": "main",
"index": 0
}
]
}
},
"Load Plugins": {
"main": {
"0": [
{
"node": "Load Tool Policies",
"type": "main",
"index": 0
}
]
}
},
"Load Tool Policies": {
"main": {
"0": [
{
"node": "Load Context",
"type": "main",
"index": 0
}
]
}
},
"Load Context": {
"main": {
"0": [
{
"node": "Seed Messages",
"type": "main",
"index": 0
}
]
}
},
"Seed Messages": {
"main": {
"0": [
{
"node": "Append Context",
"type": "main",
"index": 0
}
]
}
},
"Append Context": {
"main": {
"0": [
{
"node": "Append User Instruction",
"type": "main",
"index": 0
}
]
}
},
"Append User Instruction": {
"main": {
"0": [
{
"node": "Main Loop",
"type": "main",
"index": 0
}
]
}
},
"Main Loop": {
"main": {
"0": [
{
"node": "AI Request",
"type": "main",
"index": 0
}
]
}
},
"AI Request": {
"main": {
"0": [
{
"node": "Run Tool Calls",
"type": "main",
"index": 0
}
]
}
},
"Run Tool Calls": {
"main": {
"0": [
{
"node": "Append Tool Results",
"type": "main",
"index": 0
}
]
}
},
"Append Tool Results": {
"main": {
"0": [
{
"node": "Main Loop",
"type": "main",
"index": 0
}
]
}
}
}
}

View File

@@ -1,15 +0,0 @@
def hello_plugin():
"""A simple plugin that returns a greeting."""
return "Hello from the plugin system!"
hello_plugin.tool_metadata = {
"type": "function",
"function": {
"name": "hello_plugin",
"description": "A simple greeting from the plugin system.",
"parameters": {
"type": "object",
"properties": {}
}
}
}

View File

@@ -1,116 +0,0 @@
{
"name": "Default Workflow",
"active": false,
"nodes": [
{
"id": "load_context",
"name": "Load Context",
"type": "core.load_context",
"typeVersion": 1,
"position": [0, 0],
"parameters": {}
},
{
"id": "seed_messages",
"name": "Seed Messages",
"type": "core.seed_messages",
"typeVersion": 1,
"position": [0, 100],
"parameters": {}
},
{
"id": "append_context",
"name": "Append Context",
"type": "core.append_context_message",
"typeVersion": 1,
"position": [300, 50],
"parameters": {}
},
{
"id": "append_user_instruction",
"name": "Append User Instruction",
"type": "core.append_user_instruction",
"typeVersion": 1,
"position": [600, 50],
"parameters": {}
},
{
"id": "main_loop",
"name": "Main Loop",
"type": "control.loop",
"typeVersion": 1,
"position": [900, 50],
"parameters": {
"max_iterations": 10,
"stop_when": "$no_tool_calls",
"stop_on": "true"
}
},
{
"id": "ai_request",
"name": "AI Request",
"type": "core.ai_request",
"typeVersion": 1,
"position": [1200, 50],
"parameters": {}
},
{
"id": "run_tool_calls",
"name": "Run Tool Calls",
"type": "core.run_tool_calls",
"typeVersion": 1,
"position": [1500, 50],
"parameters": {}
},
{
"id": "append_tool_results",
"name": "Append Tool Results",
"type": "core.append_tool_results",
"typeVersion": 1,
"position": [1800, 50],
"parameters": {}
}
],
"connections": {
"Load Context": {
"main": {
"0": [{"node": "Append Context", "type": "main", "index": 0}]
}
},
"Seed Messages": {
"main": {
"0": [{"node": "Append Context", "type": "main", "index": 0}]
}
},
"Append Context": {
"main": {
"0": [{"node": "Append User Instruction", "type": "main", "index": 0}]
}
},
"Append User Instruction": {
"main": {
"0": [{"node": "Main Loop", "type": "main", "index": 0}]
}
},
"Main Loop": {
"main": {
"0": [{"node": "AI Request", "type": "main", "index": 0}]
}
},
"AI Request": {
"main": {
"0": [{"node": "Run Tool Calls", "type": "main", "index": 0}]
}
},
"Run Tool Calls": {
"main": {
"0": [{"node": "Append Tool Results", "type": "main", "index": 0}]
}
},
"Append Tool Results": {
"main": {
"0": [{"node": "Main Loop", "type": "main", "index": 0}]
}
}
}
}

View File

@@ -8,4 +8,6 @@ def run(runtime, _inputs):
gh = runtime.context.get("gh")
registry = load_tool_registry()
tool_map = build_tool_map(gh, registry)
# Store in both store (for workflow) and context (for other plugins)
runtime.context["tool_map"] = tool_map
return {"result": tool_map}

View File

@@ -8,4 +8,6 @@ def run(runtime, _inputs):
msgs = runtime.context.get("msgs", {})
gh = create_github_integration(token, msgs)
# Store in both store (for workflow) and context (for other plugins)
runtime.context["gh"] = gh
return {"result": gh, "initialized": gh is not None}

View File

@@ -7,4 +7,6 @@ def run(runtime, _inputs):
token = runtime.context.get("github_token")
client = create_openai_client(token)
# Store in both store (for workflow) and context (for other plugins)
runtime.context["client"] = client
return {"result": client, "initialized": client is not None}

View File

@@ -2,7 +2,9 @@
from ... import load_messages
def run(_runtime, _inputs):
def run(runtime, _inputs):
"""Load translation messages."""
messages = load_messages()
# Store in both store (for workflow) and context (for other plugins)
runtime.context["msgs"] = messages
return {"result": messages}

View File

@@ -2,7 +2,9 @@
from ...metadata_loader import load_metadata
def run(_runtime, _inputs):
def run(runtime, _inputs):
"""Load metadata.json."""
metadata = load_metadata()
# Store in both store (for workflow) and context (for other plugins)
runtime.context["metadata"] = metadata
return {"result": metadata}

View File

@@ -1,8 +1,13 @@
"""Workflow plugin: load prompt configuration."""
from ...prompt_loader import load_prompt_yaml
from ...model_resolver import resolve_model_name
def run(_runtime, _inputs):
def run(runtime, _inputs):
"""Load prompt.yml."""
prompt = load_prompt_yaml()
# Store in both store (for workflow) and context (for other plugins)
runtime.context["prompt"] = prompt
# Update model_name based on loaded prompt
runtime.context["model_name"] = resolve_model_name(prompt)
return {"result": prompt}

View File

@@ -0,0 +1,10 @@
"""Workflow plugin: load tool policies."""
from ...tool_policy_loader import load_tool_policies
def run(runtime, _inputs):
"""Load tool_policies.json."""
tool_policies = load_tool_policies()
# Store in both store (for workflow) and context (for other plugins)
runtime.context["tool_policies"] = tool_policies
return {"result": tool_policies}

View File

@@ -6,4 +6,6 @@ def run(runtime, _inputs):
"""Load tool definitions."""
metadata = runtime.context.get("metadata", {})
tools = load_tools(metadata)
# Store in both store (for workflow) and context (for other plugins)
runtime.context["tools"] = tools
return {"result": tools}

View File

@@ -4,7 +4,12 @@ from .model_resolver import resolve_model_name
def build_workflow_context(parts: dict) -> dict:
"""Build the workflow context dict."""
prompt = parts["prompt"]
context = dict(parts)
context["model_name"] = resolve_model_name(prompt)
# Only resolve model if prompt is available, otherwise use default
if "prompt" in parts:
prompt = parts["prompt"]
context["model_name"] = resolve_model_name(prompt)
else:
# Workflow plugins will load prompt, model will be resolved then
context["model_name"] = resolve_model_name({})
return context