Refactor codebase for better modularity: move load_messages to __init__.py, streamline GitHub operations in github_integration.py, and enhance main.py with improved function organization and error handling.

This commit is contained in:
2026-01-09 13:27:44 +00:00
parent 2aa566d07a
commit 160864a320
3 changed files with 143 additions and 86 deletions

View File

@@ -0,0 +1,20 @@
"""
AutoMetabuilder package.
"""
import os
import json
def load_messages():
"""Load messages based on APP_LANG environment variable."""
lang = os.environ.get("APP_LANG", "en")
messages_path = os.path.join(
os.path.dirname(__file__), f"messages_{lang}.json"
)
if not os.path.exists(messages_path):
# Fallback to English if the requested language file doesn't exist
messages_path = os.path.join(
os.path.dirname(__file__), "messages_en.json"
)
with open(messages_path, "r", encoding="utf-8") as f:
return json.load(f)

View File

@@ -1,43 +1,56 @@
"""
GitHub integration module.
"""
import os
from github import Github
from github.Repository import Repository
from github.Issue import Issue
from github.PullRequest import PullRequest
import json
from . import load_messages
def load_messages():
lang = os.environ.get("APP_LANG", "en")
messages_path = os.path.join(os.path.dirname(__file__), f"messages_{lang}.json")
if not os.path.exists(messages_path):
# Fallback to English if the requested language file doesn't exist
messages_path = os.path.join(os.path.dirname(__file__), "messages_en.json")
with open(messages_path, "r") as f:
return json.load(f)
class GitHubIntegration:
"""Class to handle GitHub interactions."""
def __init__(self, token: str, repo_name: str):
self.github = Github(token)
self.repo = self.github.get_repo(repo_name)
def get_open_issues(self):
"""Get open issues from the repository."""
return self.repo.get_issues(state='open')
def get_issue(self, issue_number: int) -> Issue:
"""Get a specific issue by number."""
return self.repo.get_issue(number=issue_number)
def create_branch(self, branch_name: str, base_branch: str = "main"):
"""Create a new branch from a base branch."""
base_ref = self.repo.get_git_ref(f"heads/{base_branch}")
self.repo.create_git_ref(ref=f"refs/heads/{branch_name}", sha=base_ref.object.sha)
self.repo.create_git_ref(
ref=f"refs/heads/{branch_name}", sha=base_ref.object.sha
)
def create_pull_request(self, title: str, body: str, head_branch: str, base_branch: str = "main") -> PullRequest:
return self.repo.create_pull(title=title, body=body, head=head_branch, base=base_branch)
def create_pull_request(
self,
title: str,
body: str,
head_branch: str,
base_branch: str = "main",
) -> PullRequest:
"""Create a new pull request."""
return self.repo.create_pull(
title=title, body=body, head=head_branch, base=base_branch
)
def get_pull_requests(self, state: str = 'open'):
def get_pull_requests(self, state: str = "open"):
"""Get pull requests from the repository."""
return self.repo.get_pulls(state=state)
def get_repo_name_from_env() -> str:
# Try to get from environment variable, or fallback to some detection if possible
"""Retrieve repository name from environment variable."""
# Try to get from environment variable
repo_name = os.environ.get("GITHUB_REPOSITORY")
if not repo_name:
# Fallback or error

View File

@@ -1,112 +1,69 @@
"""
Main entry point for AutoMetabuilder.
"""
import os
import json
import requests
import yaml
import json
from dotenv import load_dotenv
from openai import OpenAI
from . import load_messages
from .github_integration import GitHubIntegration, get_repo_name_from_env
load_dotenv()
DEFAULT_RAW_PROMPT_URL = "https://raw.githubusercontent.com/johndoe6345789/metabuilder/main/getonwithit.prompt.yml"
DEFAULT_RAW_PROMPT_URL = (
"https://raw.githubusercontent.com/johndoe6345789/"
"metabuilder/main/getonwithit.prompt.yml"
)
DEFAULT_ENDPOINT = "https://models.github.ai/inference"
def load_prompt_yaml(url: str, token: str) -> dict:
"""Load prompt configuration from a remote YAML file."""
headers = {"Authorization": f"Bearer {token}"}
r = requests.get(url, headers=headers, timeout=30)
r.raise_for_status()
return yaml.safe_load(r.text)
def load_messages():
lang = os.environ.get("APP_LANG", "en")
messages_path = os.path.join(os.path.dirname(__file__), f"messages_{lang}.json")
if not os.path.exists(messages_path):
# Fallback to English if the requested language file doesn't exist
messages_path = os.path.join(os.path.dirname(__file__), "messages_en.json")
with open(messages_path, "r") as f:
return json.load(f)
def main():
msgs = load_messages()
token = os.environ.get("GITHUB_TOKEN")
if not token:
print(msgs["error_github_token_missing"])
return
# Initialize GitHub Integration
try:
repo_name = get_repo_name_from_env()
gh = GitHubIntegration(token, repo_name)
print(msgs["info_integrated_repo"].format(repo_name=repo_name))
except Exception as e:
print(msgs["warn_github_init_failed"].format(error=e))
gh = None
endpoint = os.environ.get("GITHUB_MODELS_ENDPOINT", DEFAULT_ENDPOINT)
client = OpenAI(
base_url=endpoint,
api_key=token,
)
prompt_url = os.environ.get("RAW_PROMPT_URL", DEFAULT_RAW_PROMPT_URL)
prompt = load_prompt_yaml(prompt_url, token)
messages = prompt["messages"]
model = prompt.get("model", "openai/gpt-4.1")
# Load tools for SDLC operations from JSON file
tools_path = os.path.join(os.path.dirname(__file__), "tools.json")
with open(tools_path, "r") as f:
tools = json.load(f)
# Add SDLC Context if available
def get_sdlc_context(gh: GitHubIntegration, msgs: dict) -> str:
"""Retrieve SDLC context (issues and PRs) from GitHub."""
sdlc_context = ""
if gh:
try:
issues = gh.get_open_issues()
issue_list = "\n".join([f"- #{i.number}: {i.title}" for i in issues[:5]])
issue_list = "\n".join(
[f"- #{i.number}: {i.title}" for i in issues[:5]]
)
if issue_list:
sdlc_context += f"\n{msgs['open_issues_label']}\n{issue_list}"
prs = gh.get_pull_requests()
pr_list = "\n".join([f"- #{p.number}: {p.title}" for p in prs[:5]])
if pr_list:
sdlc_context += f"\n{msgs['open_prs_label']}\n{pr_list}"
except Exception as e:
except Exception as e: # pylint: disable=broad-exception-caught
print(msgs["error_sdlc_context"].format(error=e))
return sdlc_context
if sdlc_context:
messages.append({"role": "system", "content": f"{msgs['sdlc_context_label']}{sdlc_context}"})
# Add runtime request
messages.append({"role": "user", "content": msgs["user_next_step"]})
response = client.chat.completions.create(
model=model,
messages=messages,
tools=tools,
tool_choice="auto",
temperature=1.0,
top_p=1.0,
)
response_message = response.choices[0].message
print(response_message.content if response_message.content else msgs["info_tool_call_requested"])
# Handle tool calls
if response_message.tool_calls:
for tool_call in response_message.tool_calls:
def handle_tool_calls(resp_msg, gh: GitHubIntegration, msgs: dict):
"""Process tool calls from the AI response."""
if resp_msg.tool_calls:
for tool_call in resp_msg.tool_calls:
function_name = tool_call.function.name
args = json.loads(tool_call.function.arguments)
if function_name == "create_branch":
if gh:
print(msgs["info_executing_create_branch"].format(args=args))
print(
msgs["info_executing_create_branch"].format(args=args)
)
gh.create_branch(**args)
else:
print(msgs["error_github_not_available"])
elif function_name == "create_pull_request":
if gh:
print(msgs["info_executing_create_pr"].format(args=args))
@@ -114,5 +71,72 @@ def main():
else:
print(msgs["error_github_not_available"])
def main():
"""Main function to run AutoMetabuilder."""
msgs = load_messages()
token = os.environ.get("GITHUB_TOKEN")
if not token:
print(msgs["error_github_token_missing"])
return
# Initialize GitHub Integration
gh = None
try:
repo_name = get_repo_name_from_env()
gh = GitHubIntegration(token, repo_name)
print(msgs["info_integrated_repo"].format(repo_name=repo_name))
except Exception as e: # pylint: disable=broad-exception-caught
print(msgs["warn_github_init_failed"].format(error=e))
client = OpenAI(
base_url=os.environ.get("GITHUB_MODELS_ENDPOINT", DEFAULT_ENDPOINT),
api_key=token,
)
prompt = load_prompt_yaml(
os.environ.get("RAW_PROMPT_URL", DEFAULT_RAW_PROMPT_URL), token
)
# Load tools for SDLC operations from JSON file
tools_path = os.path.join(os.path.dirname(__file__), "tools.json")
with open(tools_path, "r", encoding="utf-8") as f:
tools = json.load(f)
# Add SDLC Context if available
sdlc_context = get_sdlc_context(gh, msgs)
messages = prompt["messages"]
if sdlc_context:
messages.append(
{
"role": "system",
"content": f"{msgs['sdlc_context_label']}{sdlc_context}",
}
)
# Add runtime request
messages.append({"role": "user", "content": msgs["user_next_step"]})
response = client.chat.completions.create(
model=prompt.get("model", "openai/gpt-4.1"),
messages=messages,
tools=tools,
tool_choice="auto",
temperature=1.0,
top_p=1.0,
)
resp_msg = response.choices[0].message
print(
resp_msg.content
if resp_msg.content
else msgs["info_tool_call_requested"]
)
# Handle tool calls
handle_tool_calls(resp_msg, gh, msgs)
if __name__ == "__main__":
main()