Merge pull request #13 from johndoe6345789/copilot/convert-autometabuilder-to-plugins

Inline utils module into workflow plugins and remove utils directory
This commit is contained in:
2026-01-10 16:51:23 +00:00
committed by GitHub
18 changed files with 315 additions and 175 deletions

View File

@@ -1,13 +1,60 @@
"""Application runner."""
import argparse
import logging
import os
from .utils import parse_args
from .loaders import load_env
from .utils.logging_config import configure_logging
from .loaders import load_metadata
from .web.server import start_web_ui
from .engine import load_workflow_config, build_workflow_context, build_workflow_engine
TRACE_LEVEL = 5
def configure_logging() -> None:
"""Configure logging with TRACE support."""
logging.addLevelName(TRACE_LEVEL, "TRACE")
if not hasattr(logging, "TRACE"):
setattr(logging, "TRACE", TRACE_LEVEL)
def trace(self, message, *args, **kwargs):
if self.isEnabledFor(TRACE_LEVEL):
self.log(TRACE_LEVEL, message, *args, **kwargs)
logging.Logger.trace = trace # type: ignore[attr-defined]
level_name = os.environ.get("LOG_LEVEL", "INFO").upper()
level = getattr(logging, level_name, logging.INFO)
logging.basicConfig(
level=level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler("autometabuilder.log"),
logging.StreamHandler()
]
)
def parse_args():
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(description="AutoMetabuilder: AI-driven SDLC assistant.")
parser.add_argument(
"--dry-run",
action="store_true",
help="Do not execute state-modifying tools."
)
parser.add_argument(
"--yolo",
action="store_true",
help="Execute tools without confirmation."
)
parser.add_argument(
"--once",
action="store_true",
help="Run a single full iteration (AI -> Tool -> AI)."
)
parser.add_argument("--web", action="store_true", help="Start the Web UI.")
return parser.parse_args()
def run_app() -> None:
"""Run the AutoMetabuilder CLI."""

View File

@@ -1,5 +1,12 @@
"""Build workflow runtime context."""
from ..utils.model_resolver import resolve_model_name
import os
DEFAULT_MODEL = "openai/gpt-4o"
def resolve_model_name(prompt: dict) -> str:
"""Resolve model name from env or prompt."""
return os.environ.get("LLM_MODEL", prompt.get("model", DEFAULT_MODEL))
def build_workflow_context(parts: dict) -> dict:

View File

@@ -1,15 +1,15 @@
"""
Roadmap utilities - compatibility module that wraps workflow plugins.
This module provides backward-compatible functions for roadmap operations
by calling the underlying workflow plugin implementations.
"""
import os
import re
import logging
logger = logging.getLogger("autometabuilder")
def update_roadmap(content: str):
"""Update ROADMAP.md with new content."""
with open("ROADMAP.md", "w", encoding="utf-8") as f:
f.write(content)
logger.info("ROADMAP.md updated successfully.")
def is_mvp_reached() -> bool:
"""Check if the MVP section in ROADMAP.md is completed."""
@@ -43,3 +43,10 @@ def is_mvp_reached() -> bool:
return True
return False
def update_roadmap(content: str):
"""Update ROADMAP.md with new content."""
with open("ROADMAP.md", "w", encoding="utf-8") as f:
f.write(content)
logger.info("ROADMAP.md updated successfully.")

View File

@@ -1,6 +1,42 @@
"""Run a task inside Docker."""
import subprocess
import os
from ..utils.docker_utils import run_command_in_docker
import logging
logger = logging.getLogger("autometabuilder.docker")
def run_command_in_docker(image: str, command: str, volumes: dict = None, workdir: str = None):
"""
Run a command inside a Docker container.
:param image: Docker image to use.
:param command: Command to execute.
:param volumes: Dictionary of volume mappings {host_path: container_path}.
:param workdir: Working directory inside the container.
:return: Standard output of the command.
"""
docker_command = ["docker", "run", "--rm"]
if volumes:
for host_path, container_path in volumes.items():
docker_command.extend(["-v", f"{os.path.abspath(host_path)}:{container_path}"])
if workdir:
docker_command.extend(["-w", workdir])
docker_command.append(image)
docker_command.extend(["sh", "-c", command])
logger.info(f"Executing in Docker ({image}): {command}")
result = subprocess.run(docker_command, capture_output=True, text=True, check=False)
output = result.stdout
if result.stderr:
output += "\n" + result.stderr
logger.info(output)
return output
def run_docker_task(image: str, command: str, workdir: str = "/workspace") -> str:

View File

@@ -1,28 +0,0 @@
"""
Utilities module for AutoMetabuilder.
This module contains various utility functions:
- cli_args: CLI argument parsing
- docker_utils: Docker command utilities
- logging_config: Logging configuration with TRACE support
- model_resolver: Resolve LLM model names
- roadmap_utils: Roadmap file utilities
- tool_map_builder: Build tool map from registry
"""
from .cli_args import parse_args
from .docker_utils import run_command_in_docker
from .logging_config import configure_logging
from .model_resolver import resolve_model_name
from .roadmap_utils import is_mvp_reached, update_roadmap
from .tool_map_builder import build_tool_map
__all__ = [
"parse_args",
"run_command_in_docker",
"configure_logging",
"resolve_model_name",
"is_mvp_reached",
"update_roadmap",
"build_tool_map",
]

View File

@@ -1,24 +0,0 @@
"""CLI argument parsing."""
import argparse
def parse_args():
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(description="AutoMetabuilder: AI-driven SDLC assistant.")
parser.add_argument(
"--dry-run",
action="store_true",
help="Do not execute state-modifying tools."
)
parser.add_argument(
"--yolo",
action="store_true",
help="Execute tools without confirmation."
)
parser.add_argument(
"--once",
action="store_true",
help="Run a single full iteration (AI -> Tool -> AI)."
)
parser.add_argument("--web", action="store_true", help="Start the Web UI.")
return parser.parse_args()

View File

@@ -1,37 +0,0 @@
import subprocess
import os
import logging
logger = logging.getLogger("autometabuilder.docker")
def run_command_in_docker(image: str, command: str, volumes: dict = None, workdir: str = None):
"""
Run a command inside a Docker container.
:param image: Docker image to use.
:param command: Command to execute.
:param volumes: Dictionary of volume mappings {host_path: container_path}.
:param workdir: Working directory inside the container.
:return: Standard output of the command.
"""
docker_command = ["docker", "run", "--rm"]
if volumes:
for host_path, container_path in volumes.items():
docker_command.extend(["-v", f"{os.path.abspath(host_path)}:{container_path}"])
if workdir:
docker_command.extend(["-w", workdir])
docker_command.append(image)
docker_command.extend(["sh", "-c", command])
logger.info(f"Executing in Docker ({image}): {command}")
result = subprocess.run(docker_command, capture_output=True, text=True, check=False)
output = result.stdout
if result.stderr:
output += "\n" + result.stderr
logger.info(output)
return output

View File

@@ -1,29 +0,0 @@
"""Logging configuration with TRACE support."""
import logging
import os
TRACE_LEVEL = 5
def configure_logging() -> None:
"""Configure logging with TRACE support."""
logging.addLevelName(TRACE_LEVEL, "TRACE")
if not hasattr(logging, "TRACE"):
setattr(logging, "TRACE", TRACE_LEVEL)
def trace(self, message, *args, **kwargs):
if self.isEnabledFor(TRACE_LEVEL):
self.log(TRACE_LEVEL, message, *args, **kwargs)
logging.Logger.trace = trace # type: ignore[attr-defined]
level_name = os.environ.get("LOG_LEVEL", "INFO").upper()
level = getattr(logging, level_name, logging.INFO)
logging.basicConfig(
level=level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler("autometabuilder.log"),
logging.StreamHandler()
]
)

View File

@@ -1,9 +0,0 @@
"""Resolve the LLM model name."""
import os
DEFAULT_MODEL = "openai/gpt-4o"
def resolve_model_name(prompt: dict) -> str:
"""Resolve model name from env or prompt."""
return os.environ.get("LLM_MODEL", prompt.get("model", DEFAULT_MODEL))

View File

@@ -1,22 +0,0 @@
"""Build tool map from registry entries."""
from ..loaders.callable_loader import load_callable
def build_tool_map(gh, registry_entries: list) -> dict:
"""Build tool name to callable map."""
tool_map = {}
for entry in registry_entries:
name = entry.get("name")
provider = entry.get("provider")
if not name:
continue
if provider == "github":
method = entry.get("method")
tool_map[name] = getattr(gh, method) if gh and method else None
continue
if provider == "module":
path = entry.get("callable")
tool_map[name] = load_callable(path) if path else None
continue
tool_map[name] = None
return tool_map

View File

@@ -1,13 +1,33 @@
"""Workflow plugin: build tool map."""
from ....utils.tool_map_builder import build_tool_map
from ....loaders.callable_loader import load_callable
from ....loaders.tool_registry_loader import load_tool_registry
def _build_tool_map(gh, registry_entries: list) -> dict:
"""Build tool name to callable map."""
tool_map = {}
for entry in registry_entries:
name = entry.get("name")
provider = entry.get("provider")
if not name:
continue
if provider == "github":
method = entry.get("method")
tool_map[name] = getattr(gh, method) if gh and method else None
continue
if provider == "module":
path = entry.get("callable")
tool_map[name] = load_callable(path) if path else None
continue
tool_map[name] = None
return tool_map
def run(runtime, _inputs):
"""Build tool registry map."""
gh = runtime.context.get("gh")
registry = load_tool_registry()
tool_map = build_tool_map(gh, registry)
tool_map = _build_tool_map(gh, registry)
# Store in both store (for workflow) and context (for other plugins)
runtime.context["tool_map"] = tool_map
return {"result": tool_map}

View File

@@ -1,5 +1,32 @@
"""Workflow plugin: configure logging."""
from ....utils.logging_config import configure_logging
import logging
import os
TRACE_LEVEL = 5
def _configure_logging() -> None:
"""Configure logging with TRACE support."""
logging.addLevelName(TRACE_LEVEL, "TRACE")
if not hasattr(logging, "TRACE"):
setattr(logging, "TRACE", TRACE_LEVEL)
def trace(self, message, *args, **kwargs):
if self.isEnabledFor(TRACE_LEVEL):
self.log(TRACE_LEVEL, message, *args, **kwargs)
logging.Logger.trace = trace # type: ignore[attr-defined]
level_name = os.environ.get("LOG_LEVEL", "INFO").upper()
level = getattr(logging, level_name, logging.INFO)
logging.basicConfig(
level=level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler("autometabuilder.log"),
logging.StreamHandler()
]
)
def run(_runtime, _inputs):
@@ -14,5 +41,5 @@ def run(_runtime, _inputs):
Returns:
dict: Success indicator
"""
configure_logging()
_configure_logging()
return {"result": "Logging configured"}

View File

@@ -1,6 +1,13 @@
"""Workflow plugin: load prompt configuration."""
import os
from ....loaders.prompt_loader import load_prompt_yaml
from ....utils.model_resolver import resolve_model_name
DEFAULT_MODEL = "openai/gpt-4o"
def _resolve_model_name(prompt: dict) -> str:
"""Resolve model name from env or prompt."""
return os.environ.get("LLM_MODEL", prompt.get("model", DEFAULT_MODEL))
def run(runtime, _inputs):
@@ -9,5 +16,5 @@ def run(runtime, _inputs):
# Store in both store (for workflow) and context (for other plugins)
runtime.context["prompt"] = prompt
# Update model_name based on loaded prompt
runtime.context["model_name"] = resolve_model_name(prompt)
runtime.context["model_name"] = _resolve_model_name(prompt)
return {"result": prompt}

View File

@@ -1,10 +1,32 @@
"""Workflow plugin: parse CLI arguments."""
from ....utils.cli_args import parse_args
import argparse
def _parse_args():
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(description="AutoMetabuilder: AI-driven SDLC assistant.")
parser.add_argument(
"--dry-run",
action="store_true",
help="Do not execute state-modifying tools."
)
parser.add_argument(
"--yolo",
action="store_true",
help="Execute tools without confirmation."
)
parser.add_argument(
"--once",
action="store_true",
help="Run a single full iteration (AI -> Tool -> AI)."
)
parser.add_argument("--web", action="store_true", help="Start the Web UI.")
return parser.parse_args()
def run(runtime, _inputs):
"""Parse command line arguments."""
args = parse_args()
args = _parse_args()
# Store in context for other plugins
runtime.context["args"] = args
return {

View File

@@ -1,6 +1,41 @@
"""Workflow plugin: append tool results."""
import os
import re
from ....integrations.notifications import notify_all
from ....utils.roadmap_utils import is_mvp_reached
def _is_mvp_reached() -> bool:
"""Check if the MVP section in ROADMAP.md is completed."""
if not os.path.exists("ROADMAP.md"):
return False
with open("ROADMAP.md", "r", encoding="utf-8") as f:
content = f.read()
# Find the header line containing (MVP)
header_match = re.search(r"^## .*?\(MVP\).*?$", content, re.MULTILINE | re.IGNORECASE)
if not header_match:
return False
# Get the position of the header
start_pos = header_match.end()
# Find the next header starting from start_pos
next_header_match = re.search(r"^## ", content[start_pos:], re.MULTILINE)
if next_header_match:
mvp_section = content[start_pos : start_pos + next_header_match.start()]
else:
mvp_section = content[start_pos:]
# Check if there are any unchecked items [ ]
if "[ ]" in mvp_section:
return False
# If there are checked items [x], and no unchecked items, we consider it reached
if "[x]" in mvp_section:
return True
return False
def run(runtime, inputs):
@@ -10,7 +45,7 @@ def run(runtime, inputs):
if tool_results:
messages.extend(tool_results)
if runtime.context["args"].yolo and is_mvp_reached():
if runtime.context["args"].yolo and _is_mvp_reached():
runtime.logger.info("MVP reached. Stopping YOLO loop.")
notify_all("AutoMetabuilder YOLO loop stopped: MVP reached.")

View File

@@ -1,5 +1,42 @@
"""Workflow plugin: run command in Docker container."""
from ....utils.docker_utils import run_command_in_docker
import subprocess
import os
import logging
logger = logging.getLogger("autometabuilder.docker")
def _run_command_in_docker(image: str, command: str, volumes: dict = None, workdir: str = None):
"""
Run a command inside a Docker container.
:param image: Docker image to use.
:param command: Command to execute.
:param volumes: Dictionary of volume mappings {host_path: container_path}.
:param workdir: Working directory inside the container.
:return: Standard output of the command.
"""
docker_command = ["docker", "run", "--rm"]
if volumes:
for host_path, container_path in volumes.items():
docker_command.extend(["-v", f"{os.path.abspath(host_path)}:{container_path}"])
if workdir:
docker_command.extend(["-w", workdir])
docker_command.append(image)
docker_command.extend(["sh", "-c", command])
logger.info(f"Executing in Docker ({image}): {command}")
result = subprocess.run(docker_command, capture_output=True, text=True, check=False)
output = result.stdout
if result.stderr:
output += "\n" + result.stderr
logger.info(output)
return output
def run(_runtime, inputs):
@@ -20,5 +57,5 @@ def run(_runtime, inputs):
if not image or not command:
return {"error": "Both 'image' and 'command' are required"}
output = run_command_in_docker(image, command, volumes, workdir)
output = _run_command_in_docker(image, command, volumes, workdir)
return {"output": output}

View File

@@ -1,8 +1,43 @@
"""Workflow plugin: check if MVP is reached."""
from ....utils.roadmap_utils import is_mvp_reached
import os
import re
def _is_mvp_reached() -> bool:
"""Check if the MVP section in ROADMAP.md is completed."""
if not os.path.exists("ROADMAP.md"):
return False
with open("ROADMAP.md", "r", encoding="utf-8") as f:
content = f.read()
# Find the header line containing (MVP)
header_match = re.search(r"^## .*?\(MVP\).*?$", content, re.MULTILINE | re.IGNORECASE)
if not header_match:
return False
# Get the position of the header
start_pos = header_match.end()
# Find the next header starting from start_pos
next_header_match = re.search(r"^## ", content[start_pos:], re.MULTILINE)
if next_header_match:
mvp_section = content[start_pos : start_pos + next_header_match.start()]
else:
mvp_section = content[start_pos:]
# Check if there are any unchecked items [ ]
if "[ ]" in mvp_section:
return False
# If there are checked items [x], and no unchecked items, we consider it reached
if "[x]" in mvp_section:
return True
return False
def run(_runtime, _inputs):
"""Check if the MVP section in ROADMAP.md is completed."""
mvp_reached = is_mvp_reached()
mvp_reached = _is_mvp_reached()
return {"mvp_reached": mvp_reached}

View File

@@ -1,5 +1,14 @@
"""Workflow plugin: update roadmap file."""
from ....utils.roadmap_utils import update_roadmap
import logging
logger = logging.getLogger("autometabuilder")
def _update_roadmap(content: str):
"""Update ROADMAP.md with new content."""
with open("ROADMAP.md", "w", encoding="utf-8") as f:
f.write(content)
logger.info("ROADMAP.md updated successfully.")
def run(_runtime, inputs):
@@ -8,5 +17,5 @@ def run(_runtime, inputs):
if not content:
return {"error": "Content is required"}
update_roadmap(content)
_update_roadmap(content)
return {"result": "ROADMAP.md updated successfully"}