mirror of
https://github.com/johndoe6345789/metabuilder.git
synced 2026-04-24 22:04:56 +00:00
feat(deployment): add modular Python CLI, fix node-deps registry routing, bump to Node 24
- Dockerfile.node-deps: upgrade FROM node:22 to node:24 - Dockerfile.node-deps: rewrite main registry= line to Nexus when detected (was only rewriting scoped @esbuild-kit registry, leaving registry.npmjs.org unreachable inside Docker) - Dockerfile.node-deps: fix sed ordering so cleanup of old auth lines runs before registry rewrite (prevents new registry= line from being deleted) - Add deployment/cli/ modular Python CLI powered by JSON config, replacing 12 shell scripts (build-base-images.sh, build-apps.sh, deploy.sh, start-stack.sh, release.sh, nexus-init.sh, nexus-ci-init.sh, push-to-nexus.sh, populate-nexus.sh, publish-npm-patches.sh, build-testcontainers.sh, artifactory-init.sh) - Bump rocksdict 0.3.23 -> 0.3.29 (old version removed from PyPI) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,13 +1,13 @@
|
||||
# metabuilder/base-node-deps
|
||||
#
|
||||
# Node 22 + all 33 workspace npm packages pre-installed.
|
||||
# Node 24 + all 33 workspace npm packages pre-installed.
|
||||
# App Dockerfiles copy node_modules from this image instead of running npm ci.
|
||||
#
|
||||
# Build: docker build -f Dockerfile.node-deps -t metabuilder/base-node-deps:latest ../../
|
||||
# App Dockerfiles:
|
||||
# COPY --from=metabuilder/base-node-deps /app/node_modules ./node_modules
|
||||
|
||||
FROM node:22
|
||||
FROM node:24
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -91,11 +91,12 @@ RUN npm config set fetch-retries 5 \
|
||||
echo " Verdaccio detected at $VERDACCIO_LOCAL"; \
|
||||
fi \
|
||||
&& if [ -n "$LOCAL_REG" ]; then \
|
||||
echo "==> Rewriting .npmrc scoped registries → $LOCAL_REG"; \
|
||||
sed -i 's|@esbuild-kit:registry=.*|@esbuild-kit:registry='"$LOCAL_REG"'|' .npmrc; \
|
||||
echo "==> Rewriting .npmrc registries → $LOCAL_REG"; \
|
||||
sed -i '/\/\/localhost:4873\//d' .npmrc; \
|
||||
sed -i '/\/\/localhost:8091\//d' .npmrc; \
|
||||
sed -i '/\/\/host.docker.internal/d' .npmrc; \
|
||||
sed -i 's|^registry=.*|registry='"$LOCAL_REG"'|' .npmrc; \
|
||||
sed -i 's|@esbuild-kit:registry=.*|@esbuild-kit:registry='"$LOCAL_REG"'|' .npmrc; \
|
||||
echo "$LOCAL_REG_AUTH" >> .npmrc; \
|
||||
else \
|
||||
echo ""; \
|
||||
|
||||
5
deployment/cli/__init__.py
Normal file
5
deployment/cli/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""MetaBuilder Deployment CLI — modular command system powered by JSON config."""
|
||||
|
||||
from cli.loader import build_parser, dispatch
|
||||
|
||||
__all__ = ["build_parser", "dispatch"]
|
||||
117
deployment/cli/artifactory_init.py
Normal file
117
deployment/cli/artifactory_init.py
Normal file
@@ -0,0 +1,117 @@
|
||||
"""Initialize Artifactory CE Conan2 local + remote + virtual repositories."""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from cli.helpers import curl_status, run
|
||||
|
||||
|
||||
REPO_CONFIGS = [
|
||||
("conan-local", """localRepositories:
|
||||
conan-local:
|
||||
key: conan-local
|
||||
type: conan
|
||||
packageType: conan
|
||||
description: "Local Conan2 repository for private packages"
|
||||
repoLayoutRef: conan-default
|
||||
handleReleases: true
|
||||
handleSnapshots: false"""),
|
||||
|
||||
("conan-remote", """remoteRepositories:
|
||||
conan-remote:
|
||||
key: conan-remote
|
||||
type: conan
|
||||
packageType: conan
|
||||
url: "https://center2.conan.io"
|
||||
description: "Proxy cache for Conan Center"
|
||||
repoLayoutRef: conan-default
|
||||
handleReleases: true
|
||||
handleSnapshots: false"""),
|
||||
|
||||
("generic-local", """localRepositories:
|
||||
generic-local:
|
||||
key: generic-local
|
||||
type: generic
|
||||
packageType: generic
|
||||
description: "Generic artifact storage"
|
||||
repoLayoutRef: simple-default
|
||||
handleReleases: true
|
||||
handleSnapshots: false"""),
|
||||
|
||||
("conan-virtual", """virtualRepositories:
|
||||
conan-virtual:
|
||||
key: conan-virtual
|
||||
type: conan
|
||||
packageType: conan
|
||||
description: "Virtual Conan2 repo — local packages + ConanCenter cache"
|
||||
repositories:
|
||||
- conan-local
|
||||
- conan-remote
|
||||
defaultDeploymentRepo: conan-local"""),
|
||||
]
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
art_url = os.environ.get("ARTIFACTORY_URL", "http://artifactory:8081")
|
||||
admin_pass = os.environ.get("ARTIFACTORY_ADMIN_PASS", "password")
|
||||
auth = f"admin:{admin_pass}"
|
||||
api = f"{art_url}/artifactory/api"
|
||||
|
||||
def alog(msg: str) -> None:
|
||||
print(f"[artifactory-init] {msg}")
|
||||
|
||||
alog("Waiting for Artifactory API...")
|
||||
ready = False
|
||||
for _ in range(30):
|
||||
if curl_status(f"{api}/system/ping", auth) == 200:
|
||||
ready = True
|
||||
break
|
||||
time.sleep(2)
|
||||
if not ready:
|
||||
alog("ERROR: Artifactory API not ready after 60s")
|
||||
return 1
|
||||
alog("Artifactory API is ready")
|
||||
|
||||
# Check existing repos
|
||||
result = subprocess.run(
|
||||
["curl", "-sf", "-u", auth, f"{api}/repositories"],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
existing = result.stdout if result.returncode == 0 else "[]"
|
||||
|
||||
for repo_name, yaml_body in REPO_CONFIGS:
|
||||
if f'"{repo_name}"' in existing:
|
||||
alog(f"{repo_name} already exists, skipping")
|
||||
continue
|
||||
result = subprocess.run([
|
||||
"curl", "-s", "-w", "\n%{http_code}", "-X", "PATCH",
|
||||
f"{api}/system/configuration",
|
||||
"-u", auth, "-H", "Content-Type: application/yaml", "-d", yaml_body,
|
||||
], capture_output=True, text=True)
|
||||
lines = result.stdout.strip().split("\n")
|
||||
code = lines[-1] if lines else "0"
|
||||
body = "\n".join(lines[:-1])
|
||||
if code == "200":
|
||||
alog(f"Created {repo_name} — {body}")
|
||||
else:
|
||||
alog(f"ERROR: {repo_name} returned HTTP {code}: {body}")
|
||||
return 1
|
||||
|
||||
# Verify
|
||||
alog("Verifying repositories...")
|
||||
for repo_name in ["conan-local", "conan-remote", "conan-virtual", "generic-local"]:
|
||||
status = curl_status(f"{art_url}/artifactory/{repo_name}/", auth)
|
||||
alog(f" {'ok' if status == 200 else f'WARN (HTTP {status})'} {repo_name}")
|
||||
|
||||
alog("")
|
||||
alog("=" * 38)
|
||||
alog(" Artifactory CE ready!")
|
||||
alog(f" Web UI : http://localhost:8092")
|
||||
alog(f" Login : admin / {admin_pass}")
|
||||
alog(f" Conan virtual : {art_url}/artifactory/api/conan/conan-virtual")
|
||||
alog("=" * 38)
|
||||
return 0
|
||||
|
||||
|
||||
run = run_cmd
|
||||
112
deployment/cli/build_apps.py
Normal file
112
deployment/cli/build_apps.py
Normal file
@@ -0,0 +1,112 @@
|
||||
"""Build application Docker images via docker compose."""
|
||||
|
||||
import argparse
|
||||
import time
|
||||
from cli.helpers import (
|
||||
BASE_DIR, PROJECT_ROOT, GREEN, YELLOW, NC,
|
||||
docker_compose, docker_image_exists, log_err, log_info, log_ok, log_warn,
|
||||
pull_with_retry, resolve_services, run,
|
||||
)
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
defs = config["definitions"]
|
||||
base_images = defs["base_images"]
|
||||
|
||||
# Ensure base-node-deps exists
|
||||
node_tag = base_images["node-deps"]["tag"]
|
||||
if not docker_image_exists(node_tag):
|
||||
log_warn(f"Building {node_tag} (required by all Node.js frontends)...")
|
||||
result = run([
|
||||
"docker", "build",
|
||||
"-f", str(BASE_DIR / base_images["node-deps"]["dockerfile"]),
|
||||
"-t", node_tag, str(PROJECT_ROOT),
|
||||
])
|
||||
if result.returncode != 0:
|
||||
log_err("Failed to build base-node-deps — cannot proceed")
|
||||
return 1
|
||||
else:
|
||||
log_ok(f"Base image {node_tag} exists")
|
||||
|
||||
# Warn about optional bases
|
||||
optional = ["apt", "conan-deps", "pip-deps", "android-sdk"]
|
||||
missing = [base_images[k]["tag"] for k in optional if not docker_image_exists(base_images[k]["tag"])]
|
||||
if missing:
|
||||
log_warn("Optional base images not built (C++ daemons, dev container):")
|
||||
for img in missing:
|
||||
print(f" - {img}")
|
||||
print(f"{YELLOW}Build with:{NC} python3 deployment.py build base\n")
|
||||
|
||||
all_apps = defs["all_apps"]
|
||||
targets = args.apps if args.apps else list(all_apps)
|
||||
services = resolve_services(targets, config)
|
||||
if services is None:
|
||||
return 1
|
||||
|
||||
# Skip existing (unless --force)
|
||||
if not args.force:
|
||||
needs_build, needs_names = [], []
|
||||
for t, svc in zip(targets, services):
|
||||
img = f"deployment-{svc}"
|
||||
if docker_image_exists(img):
|
||||
log_ok(f"Skipping {t} — image {img} already exists (use --force to rebuild)")
|
||||
else:
|
||||
needs_names.append(t)
|
||||
needs_build.append(svc)
|
||||
if not needs_build:
|
||||
print(f"\n{GREEN}All app images already built! Use --force to rebuild.{NC}")
|
||||
return 0
|
||||
targets, services = needs_names, needs_build
|
||||
|
||||
print(f"{YELLOW}Building: {' '.join(targets)}{NC}\n")
|
||||
|
||||
# Pre-pull base images
|
||||
log_info("Pre-pulling base images for app builds...")
|
||||
for img in defs["external_images"]["build_bases"]:
|
||||
if not docker_image_exists(img):
|
||||
print(f" Pulling {img}...")
|
||||
pull_with_retry(img)
|
||||
|
||||
# Build with retry
|
||||
max_attempts = 5
|
||||
build_ok = False
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
if attempt > 1:
|
||||
log_warn(f"Build attempt {attempt}/{max_attempts}...")
|
||||
|
||||
if args.sequential:
|
||||
all_ok = True
|
||||
for svc in services:
|
||||
log_info(f"Building {svc}...")
|
||||
result = run(docker_compose("build", svc))
|
||||
if result.returncode != 0:
|
||||
log_err(f"Failed: {svc}")
|
||||
all_ok = False
|
||||
break
|
||||
log_ok(f"Done: {svc}")
|
||||
if all_ok:
|
||||
build_ok = True
|
||||
break
|
||||
else:
|
||||
log_info("Parallel build (uses more RAM)...")
|
||||
result = run(docker_compose("build", "--parallel", *services))
|
||||
if result.returncode == 0:
|
||||
build_ok = True
|
||||
break
|
||||
|
||||
if attempt < max_attempts:
|
||||
wait = attempt * 10
|
||||
log_warn(f"Build failed (attempt {attempt}/{max_attempts}), retrying in {wait}s...")
|
||||
time.sleep(wait)
|
||||
|
||||
if not build_ok:
|
||||
log_err(f"Build failed after {max_attempts} attempts")
|
||||
return 1
|
||||
|
||||
print(f"\n{GREEN}Build complete!{NC}")
|
||||
print("Start with: python3 deployment.py stack up")
|
||||
return 0
|
||||
|
||||
|
||||
# Module entry point — called by loader.dispatch()
|
||||
run = run_cmd
|
||||
57
deployment/cli/build_base.py
Normal file
57
deployment/cli/build_base.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Build base Docker images (apt, node-deps, pip-deps, conan-deps, android-sdk, devcontainer)."""
|
||||
|
||||
import argparse
|
||||
from cli.helpers import (
|
||||
BASE_DIR, PROJECT_ROOT, GREEN, NC,
|
||||
build_with_retry, docker_image_exists, docker_image_size,
|
||||
log_ok, log_warn, log_err,
|
||||
)
|
||||
|
||||
|
||||
def run(args: argparse.Namespace, config: dict) -> int:
|
||||
defs = config["definitions"]
|
||||
build_order = defs["base_build_order"]
|
||||
base_images = defs["base_images"]
|
||||
targets = args.images if args.images else list(build_order)
|
||||
|
||||
for t in targets:
|
||||
if t not in base_images:
|
||||
log_err(f"Unknown base image: {t}")
|
||||
print(f"Available: {', '.join(build_order)}")
|
||||
return 1
|
||||
|
||||
print(f"\nMetaBuilder Base Image Builder")
|
||||
print(f"Building: {' '.join(targets)}\n")
|
||||
|
||||
failed = []
|
||||
for name in build_order:
|
||||
if name not in targets:
|
||||
continue
|
||||
|
||||
img = base_images[name]
|
||||
tag = img["tag"]
|
||||
|
||||
if not args.force and docker_image_exists(tag):
|
||||
log_ok(f"Skipping {name} — {tag} already exists (use --force to rebuild)")
|
||||
continue
|
||||
|
||||
context = str(BASE_DIR) if img.get("context") == "base-images" else str(PROJECT_ROOT)
|
||||
dockerfile = str(BASE_DIR / img["dockerfile"])
|
||||
|
||||
if not build_with_retry(tag, dockerfile, context):
|
||||
failed.append(name)
|
||||
log_warn("Continuing with remaining images...")
|
||||
|
||||
print()
|
||||
if not failed:
|
||||
print(f"{GREEN}All base images built successfully!{NC}\n")
|
||||
for name in targets:
|
||||
tag = base_images[name]["tag"]
|
||||
if docker_image_exists(tag):
|
||||
print(f" {GREEN}✓{NC} {tag} ({docker_image_size(tag)})")
|
||||
print(f"\nNow run: python3 deployment.py build apps")
|
||||
return 0
|
||||
|
||||
log_err(f"Some images failed: {' '.join(failed)}")
|
||||
print(f"Re-run: python3 deployment.py build base {' '.join(failed)}")
|
||||
return 1
|
||||
56
deployment/cli/build_testcontainers.py
Normal file
56
deployment/cli/build_testcontainers.py
Normal file
@@ -0,0 +1,56 @@
|
||||
"""Build testcontainers Conan packages (C shared library + Go sidecar) and upload to Nexus."""
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import shutil
|
||||
from cli.helpers import PROJECT_ROOT, log_err, log_info, log_ok, run, run_check
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
nexus_url = os.environ.get("NEXUS_URL", "http://localhost:8091/repository/conan-hosted/")
|
||||
nexus_user = os.environ.get("NEXUS_USER", "admin")
|
||||
nexus_pass = os.environ.get("NEXUS_PASS", "nexus")
|
||||
recipes_dir = PROJECT_ROOT / "dbal" / "production" / "build-config" / "conan-recipes"
|
||||
|
||||
log_info("Checking prerequisites...")
|
||||
for tool, install_msg in [("go", "https://go.dev/dl/"), ("conan", "pip install conan")]:
|
||||
if not shutil.which(tool):
|
||||
log_err(f"{tool} not found. Install: {install_msg}")
|
||||
return 1
|
||||
run([tool, "version" if tool == "go" else "--version"])
|
||||
|
||||
log_info("Configuring Nexus Conan remote...")
|
||||
run(["conan", "remote", "add", "nexus", nexus_url, "--force"])
|
||||
run_check(["conan", "remote", "login", "nexus", nexus_user, "--password", nexus_pass])
|
||||
run(["conan", "remote", "disable", "conancenter"])
|
||||
run(["conan", "remote", "enable", "conancenter"])
|
||||
run(["conan", "remote", "update", "nexus", "--index", "0"])
|
||||
|
||||
if not args.skip_native:
|
||||
log_info("Building testcontainers-native/0.1.0 (C shared library)...")
|
||||
run_check(["conan", "create", str(recipes_dir / "testcontainers-native"),
|
||||
"-s", "build_type=Release", "-s", "compiler.cppstd=20", "--build=missing"])
|
||||
log_info("Uploading testcontainers-native to Nexus...")
|
||||
run_check(["conan", "upload", "testcontainers-native/0.1.0", "--remote", "nexus", "--confirm"])
|
||||
log_ok("testcontainers-native uploaded")
|
||||
else:
|
||||
log_info("Skipping testcontainers-native (--skip-native)")
|
||||
|
||||
if not args.skip_sidecar:
|
||||
sidecar_src = PROJECT_ROOT / "dbal" / "testcontainers-sidecar"
|
||||
log_info("Building testcontainers-sidecar/0.1.0 (Go binary)...")
|
||||
env = os.environ.copy()
|
||||
env["TESTCONTAINERS_SIDECAR_SRC"] = str(sidecar_src)
|
||||
run_check(["conan", "create", str(recipes_dir / "testcontainers-sidecar"),
|
||||
"-s", "build_type=Release", "-s", "compiler.cppstd=20", "--build=missing"], env=env)
|
||||
log_info("Uploading testcontainers-sidecar to Nexus...")
|
||||
run_check(["conan", "upload", "testcontainers-sidecar/0.1.0", "--remote", "nexus", "--confirm"])
|
||||
log_ok("testcontainers-sidecar uploaded")
|
||||
else:
|
||||
log_info("Skipping testcontainers-sidecar (--skip-sidecar)")
|
||||
|
||||
log_ok("Testcontainers build complete")
|
||||
return 0
|
||||
|
||||
|
||||
run = run_cmd
|
||||
213
deployment/cli/commands.json
Normal file
213
deployment/cli/commands.json
Normal file
@@ -0,0 +1,213 @@
|
||||
{
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"description": "MetaBuilder Deployment CLI — command definitions (argparse from JSON)",
|
||||
"version": "1.0.0",
|
||||
|
||||
"program": {
|
||||
"prog": "deployment.py",
|
||||
"description": "MetaBuilder Deployment CLI"
|
||||
},
|
||||
|
||||
"definitions": {
|
||||
"all_apps": ["workflowui", "codegen", "pastebin", "postgres", "emailclient", "exploded-diagrams", "storybook", "frontend-app", "dbal"],
|
||||
"base_build_order": ["apt", "conan-deps", "android-sdk", "node-deps", "pip-deps", "devcontainer"],
|
||||
"stack_commands": ["up", "start", "down", "stop", "build", "restart", "logs", "ps", "status", "clean"],
|
||||
|
||||
"service_map": {
|
||||
"workflowui": "workflowui",
|
||||
"codegen": "codegen",
|
||||
"pastebin": "pastebin",
|
||||
"postgres": "postgres-dashboard",
|
||||
"emailclient": "emailclient-app",
|
||||
"exploded-diagrams": "exploded-diagrams",
|
||||
"storybook": "storybook",
|
||||
"frontend-app": "frontend-app",
|
||||
"dbal": "dbal"
|
||||
},
|
||||
|
||||
"base_images": {
|
||||
"apt": { "dockerfile": "Dockerfile.apt", "tag": "metabuilder/base-apt:latest" },
|
||||
"conan-deps": { "dockerfile": "Dockerfile.conan-deps", "tag": "metabuilder/base-conan-deps:latest" },
|
||||
"node-deps": { "dockerfile": "Dockerfile.node-deps", "tag": "metabuilder/base-node-deps:latest" },
|
||||
"pip-deps": { "dockerfile": "Dockerfile.pip-deps", "tag": "metabuilder/base-pip-deps:latest" },
|
||||
"android-sdk": { "dockerfile": "Dockerfile.android-sdk", "tag": "metabuilder/base-android-sdk:latest", "context": "base-images" },
|
||||
"devcontainer": { "dockerfile": "Dockerfile.devcontainer", "tag": "metabuilder/devcontainer:latest" }
|
||||
},
|
||||
|
||||
"nexus_images": {
|
||||
"base": [
|
||||
{ "local": "metabuilder/base-apt:latest", "name": "base-apt", "size": "2.8GB" },
|
||||
{ "local": "metabuilder/base-node-deps:latest", "name": "base-node-deps", "size": "5.5GB" },
|
||||
{ "local": "metabuilder/base-pip-deps:latest", "name": "base-pip-deps", "size": "1.4GB" },
|
||||
{ "local": "metabuilder/base-android-sdk:latest", "name": "base-android-sdk", "size": "6.1GB" }
|
||||
],
|
||||
"apps": [
|
||||
{ "local": "deployment-dbal-init:latest", "name": "dbal-init", "size": "12MB" },
|
||||
{ "local": "deployment-storybook:latest", "name": "storybook", "size": "112MB" },
|
||||
{ "local": "deployment-nginx:latest", "name": "nginx", "size": "92MB" },
|
||||
{ "local": "deployment-nginx-stream:latest", "name": "nginx-stream", "size": "92MB" },
|
||||
{ "local": "deployment-pastebin-backend:latest", "name": "pastebin-backend", "size": "236MB" },
|
||||
{ "local": "deployment-emailclient-app:latest", "name": "emailclient", "size": "350MB" },
|
||||
{ "local": "deployment-email-service:latest", "name": "email-service", "size": "388MB" },
|
||||
{ "local": "deployment-exploded-diagrams:latest", "name": "exploded-diagrams", "size": "315MB" },
|
||||
{ "local": "deployment-pastebin:latest", "name": "pastebin", "size": "382MB" },
|
||||
{ "local": "deployment-frontend-app:latest", "name": "frontend-app", "size": "361MB" },
|
||||
{ "local": "deployment-workflowui:latest", "name": "workflowui", "size": "542MB" },
|
||||
{ "local": "deployment-postgres-dashboard:latest", "name": "postgres-dashboard", "size": "508MB" },
|
||||
{ "local": "deployment-smtp-relay:latest", "name": "smtp-relay", "size": "302MB" },
|
||||
{ "local": "deployment-dbal:latest", "name": "dbal", "size": "3.0GB" },
|
||||
{ "local": "deployment-codegen:latest", "name": "codegen", "size": "5.6GB" }
|
||||
],
|
||||
"heavy": [
|
||||
{ "local": "metabuilder/base-conan-deps:latest", "name": "base-conan-deps", "size": "32GB" },
|
||||
{ "local": "metabuilder/devcontainer:latest", "name": "devcontainer", "size": "41GB" }
|
||||
],
|
||||
"heavy_apps": [
|
||||
{ "local": "deployment-media-daemon:latest", "name": "media-daemon", "size": "3.5GB" }
|
||||
]
|
||||
},
|
||||
|
||||
"npm_patches": {
|
||||
"registry": ["minimatch@10.2.4", "tar@7.5.11"],
|
||||
"local": [
|
||||
{
|
||||
"name": "@esbuild-kit/core-utils",
|
||||
"version": "3.3.3-metabuilder.0",
|
||||
"tarball": "esbuild-kit-core-utils-3.3.3-metabuilder.0.tgz"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
"external_images": {
|
||||
"core": [
|
||||
"postgres:15-alpine", "redis:7-alpine",
|
||||
"docker.elastic.co/elasticsearch/elasticsearch:8.11.0",
|
||||
"mysql:8.0", "mongo:7.0", "phpmyadmin:latest",
|
||||
"mongo-express:latest", "redis/redisinsight:latest",
|
||||
"docker.elastic.co/kibana/kibana:8.11.0",
|
||||
"boky/postfix:latest", "nginx:alpine"
|
||||
],
|
||||
"monitoring": [
|
||||
"prom/prometheus:latest", "grafana/grafana:latest",
|
||||
"grafana/loki:latest", "grafana/promtail:latest",
|
||||
"prom/node-exporter:latest", "prometheuscommunity/postgres-exporter:latest",
|
||||
"oliver006/redis_exporter:latest", "gcr.io/cadvisor/cadvisor:latest",
|
||||
"prom/alertmanager:latest"
|
||||
],
|
||||
"media": ["libretime/icecast:2.4.4"],
|
||||
"build_bases": ["node:20-alpine", "node:24-alpine", "python:3.11-slim", "python:3.12-slim", "alpine:3.19"]
|
||||
}
|
||||
},
|
||||
|
||||
"commands": {
|
||||
"build": {
|
||||
"help": "Build Docker images",
|
||||
"subcommands": {
|
||||
"base": {
|
||||
"help": "Build base Docker images",
|
||||
"module": "cli.build_base",
|
||||
"arguments": [
|
||||
{ "name": "--force", "action": "store_true", "help": "Rebuild even if images exist" },
|
||||
{ "name": "--list", "action": "store_true", "help": "List available base images" },
|
||||
{ "name": "images", "nargs": "*", "help": "Images to build (default: all)" }
|
||||
]
|
||||
},
|
||||
"apps": {
|
||||
"help": "Build application Docker images",
|
||||
"module": "cli.build_apps",
|
||||
"arguments": [
|
||||
{ "name": "--force", "action": "store_true", "help": "Rebuild even if images exist" },
|
||||
{ "name": "--sequential", "action": "store_true", "help": "Build sequentially (less RAM)" },
|
||||
{ "name": "apps", "nargs": "*", "help": "Apps to build (default: all)" }
|
||||
]
|
||||
},
|
||||
"testcontainers": {
|
||||
"help": "Build testcontainers Conan packages",
|
||||
"module": "cli.build_testcontainers",
|
||||
"arguments": [
|
||||
{ "name": "--skip-native", "action": "store_true", "help": "Skip C shared library" },
|
||||
{ "name": "--skip-sidecar", "action": "store_true", "help": "Skip Go sidecar" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"deploy": {
|
||||
"help": "Build + deploy app(s)",
|
||||
"module": "cli.deploy",
|
||||
"arguments": [
|
||||
{ "name": "apps", "nargs": "*", "help": "Apps to deploy" },
|
||||
{ "name": "--all", "action": "store_true", "help": "Deploy all apps" },
|
||||
{ "name": "--no-cache", "action": "store_true", "help": "Build without Docker cache" }
|
||||
]
|
||||
},
|
||||
"stack": {
|
||||
"help": "Manage the full MetaBuilder stack",
|
||||
"module": "cli.stack",
|
||||
"arguments": [
|
||||
{ "name": "command", "nargs": "?", "default": "up", "help": "Stack command (default: up)" },
|
||||
{ "name": "--monitoring", "action": "store_true", "help": "Include monitoring services" },
|
||||
{ "name": "--media", "action": "store_true", "help": "Include media services" },
|
||||
{ "name": "--all", "action": "store_true", "dest": "all_profiles", "help": "Include all profiles" }
|
||||
]
|
||||
},
|
||||
"release": {
|
||||
"help": "Bump version, commit, push, and deploy",
|
||||
"module": "cli.release",
|
||||
"arguments": [
|
||||
{ "name": "app", "help": "App to release" },
|
||||
{ "name": "bump", "nargs": "?", "default": "patch", "help": "patch, minor, major, or x.y.z" }
|
||||
]
|
||||
},
|
||||
"nexus": {
|
||||
"help": "Nexus registry management",
|
||||
"subcommands": {
|
||||
"init": {
|
||||
"help": "Initialize Nexus repositories",
|
||||
"module": "cli.nexus_init",
|
||||
"arguments": [
|
||||
{ "name": "--ci", "action": "store_true", "help": "Lightweight CI init (npm repos only)" }
|
||||
]
|
||||
},
|
||||
"push": {
|
||||
"help": "Push images to Nexus",
|
||||
"module": "cli.nexus_push",
|
||||
"arguments": [
|
||||
{ "name": "--tag", "help": "Image tag (default: current git branch)" },
|
||||
{ "name": "--src", "default": "ghcr.io", "help": "Source registry" },
|
||||
{ "name": "--pull", "action": "store_true", "help": "Pull from remote first" }
|
||||
]
|
||||
},
|
||||
"populate": {
|
||||
"help": "Push all images to Nexus with :main + :latest tags",
|
||||
"module": "cli.nexus_populate",
|
||||
"arguments": [
|
||||
{ "name": "--skip-heavy", "action": "store_true", "help": "Skip conan-deps, devcontainer, media-daemon" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"npm": {
|
||||
"help": "npm patch management",
|
||||
"subcommands": {
|
||||
"publish-patches": {
|
||||
"help": "Publish patched npm packages to local registry",
|
||||
"module": "cli.npm_patches",
|
||||
"arguments": [
|
||||
{ "name": "--nexus", "action": "store_true", "help": "Force Nexus on :8091" },
|
||||
{ "name": "--verdaccio", "action": "store_true", "help": "Force Verdaccio on :4873" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"artifactory": {
|
||||
"help": "Artifactory management",
|
||||
"subcommands": {
|
||||
"init": {
|
||||
"help": "Initialize Artifactory CE Conan repositories",
|
||||
"module": "cli.artifactory_init",
|
||||
"arguments": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
80
deployment/cli/deploy.py
Normal file
80
deployment/cli/deploy.py
Normal file
@@ -0,0 +1,80 @@
|
||||
"""Build + deploy one or more apps with health check polling."""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from cli.helpers import (
|
||||
COMPOSE_FILE, GREEN, RED, YELLOW, BLUE, NC,
|
||||
docker_compose, log_err, log_warn, resolve_services, run,
|
||||
)
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
all_apps = config["definitions"]["all_apps"]
|
||||
targets = list(all_apps) if args.all else args.apps
|
||||
if not targets:
|
||||
log_err("Specify app(s) to deploy, or use --all")
|
||||
print(f"Available: {', '.join(all_apps)}")
|
||||
return 1
|
||||
|
||||
services = resolve_services(targets, config)
|
||||
if services is None:
|
||||
return 1
|
||||
|
||||
print(f"\n{BLUE}{'=' * 43}{NC}")
|
||||
print(f"{BLUE} Deploy: {' '.join(targets)}{NC}")
|
||||
print(f"{BLUE}{'=' * 43}{NC}\n")
|
||||
|
||||
# Step 1: Build
|
||||
print(f"{YELLOW}[1/3] Building...{NC}")
|
||||
build_args = ["--no-cache"] if args.no_cache else []
|
||||
result = run(docker_compose("build", *build_args, *services))
|
||||
if result.returncode != 0:
|
||||
log_err("Build failed")
|
||||
return 1
|
||||
|
||||
# Step 2: Deploy
|
||||
print(f"\n{YELLOW}[2/3] Deploying...{NC}")
|
||||
result = run(docker_compose("up", "-d", "--force-recreate", *services))
|
||||
if result.returncode != 0:
|
||||
log_err("Deploy failed")
|
||||
return 1
|
||||
|
||||
# Step 3: Health check
|
||||
print(f"\n{YELLOW}[3/3] Waiting for health checks...{NC}")
|
||||
all_healthy = True
|
||||
for svc in services:
|
||||
container = f"metabuilder-{svc}"
|
||||
sys.stdout.write(f" {svc}: ")
|
||||
sys.stdout.flush()
|
||||
|
||||
status = "unknown"
|
||||
for _ in range(30):
|
||||
result = subprocess.run(
|
||||
["docker", "inspect", "--format", "{{.State.Health.Status}}", container],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
status = result.stdout.strip() if result.returncode == 0 else "missing"
|
||||
if status in ("healthy", "unhealthy"):
|
||||
break
|
||||
time.sleep(2)
|
||||
|
||||
if status == "healthy":
|
||||
print(f"{GREEN}healthy{NC}")
|
||||
elif status == "unhealthy":
|
||||
print(f"{RED}unhealthy{NC}")
|
||||
all_healthy = False
|
||||
else:
|
||||
print(f"{YELLOW}timeout (status: {status}){NC}")
|
||||
all_healthy = False
|
||||
|
||||
print()
|
||||
if all_healthy:
|
||||
print(f"{GREEN}All services deployed and healthy{NC}")
|
||||
else:
|
||||
log_warn(f"Some services not healthy — check: docker compose -f {COMPOSE_FILE} ps")
|
||||
return 0 if all_healthy else 1
|
||||
|
||||
|
||||
run = run_cmd
|
||||
145
deployment/cli/helpers.py
Normal file
145
deployment/cli/helpers.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""Shared helpers for all CLI command modules."""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
# ── Paths ────────────────────────────────────────────────────────────────────
|
||||
|
||||
SCRIPT_DIR = Path(__file__).resolve().parent.parent # deployment/
|
||||
PROJECT_ROOT = SCRIPT_DIR.parent
|
||||
BASE_DIR = SCRIPT_DIR / "base-images"
|
||||
COMPOSE_FILE = SCRIPT_DIR / "docker-compose.stack.yml"
|
||||
|
||||
# ── Colors ───────────────────────────────────────────────────────────────────
|
||||
|
||||
RED = "\033[0;31m"
|
||||
GREEN = "\033[0;32m"
|
||||
YELLOW = "\033[1;33m"
|
||||
BLUE = "\033[0;34m"
|
||||
CYAN = "\033[0;36m"
|
||||
NC = "\033[0m"
|
||||
|
||||
|
||||
def log_info(msg: str) -> None:
|
||||
print(f"{BLUE}[deploy]{NC} {msg}")
|
||||
|
||||
|
||||
def log_ok(msg: str) -> None:
|
||||
print(f"{GREEN}[deploy]{NC} {msg}")
|
||||
|
||||
|
||||
def log_warn(msg: str) -> None:
|
||||
print(f"{YELLOW}[deploy]{NC} {msg}")
|
||||
|
||||
|
||||
def log_err(msg: str) -> None:
|
||||
print(f"{RED}[deploy]{NC} {msg}")
|
||||
|
||||
|
||||
# ── Command runners ─────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def run(cmd: list[str], **kwargs) -> subprocess.CompletedProcess:
|
||||
"""Run a command, printing it and streaming output."""
|
||||
print(f" $ {' '.join(cmd)}", flush=True)
|
||||
return subprocess.run(cmd, **kwargs)
|
||||
|
||||
|
||||
def run_check(cmd: list[str], **kwargs) -> subprocess.CompletedProcess:
|
||||
"""Run a command and raise on failure."""
|
||||
return run(cmd, check=True, **kwargs)
|
||||
|
||||
|
||||
# ── Docker helpers ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def docker_image_exists(tag: str) -> bool:
|
||||
return subprocess.run(
|
||||
["docker", "image", "inspect", tag], capture_output=True,
|
||||
).returncode == 0
|
||||
|
||||
|
||||
def docker_compose(*args: str) -> list[str]:
|
||||
return ["docker", "compose", "-f", str(COMPOSE_FILE), *args]
|
||||
|
||||
|
||||
def curl_status(url: str, auth: str | None = None, timeout: int = 5) -> int:
|
||||
"""Return HTTP status code for a URL, or 0 on connection error."""
|
||||
cmd = ["curl", "-s", "-o", os.devnull, "-w", "%{http_code}",
|
||||
"--connect-timeout", str(timeout)]
|
||||
if auth:
|
||||
cmd += ["-u", auth]
|
||||
cmd.append(url)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
try:
|
||||
return int(result.stdout.strip())
|
||||
except (ValueError, AttributeError):
|
||||
return 0
|
||||
|
||||
|
||||
def pull_with_retry(image: str, max_attempts: int = 5) -> bool:
|
||||
delay = 5
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
result = run(["docker", "pull", image])
|
||||
if result.returncode == 0:
|
||||
return True
|
||||
if attempt < max_attempts:
|
||||
log_warn(f"Pull failed (attempt {attempt}/{max_attempts}), retrying in {delay}s...")
|
||||
time.sleep(delay)
|
||||
delay *= 2
|
||||
log_err(f"Failed to pull {image} after {max_attempts} attempts")
|
||||
return False
|
||||
|
||||
|
||||
def build_with_retry(tag: str, dockerfile: str, context: str, max_attempts: int = 5) -> bool:
|
||||
"""Build a Docker image with retry on failure."""
|
||||
from datetime import datetime
|
||||
date_tag = f"{tag.rsplit(':', 1)[0]}:{datetime.now().strftime('%Y%m%d')}"
|
||||
|
||||
log_info(f"Building {tag} ...")
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
result = run([
|
||||
"docker", "build", "--network=host",
|
||||
"--file", dockerfile,
|
||||
"--tag", tag, "--tag", date_tag,
|
||||
context,
|
||||
])
|
||||
if result.returncode == 0:
|
||||
log_ok(f"{tag} built successfully")
|
||||
return True
|
||||
if attempt < max_attempts:
|
||||
wait = attempt * 15
|
||||
log_warn(f"Build failed (attempt {attempt}/{max_attempts}), retrying in {wait}s ...")
|
||||
time.sleep(wait)
|
||||
|
||||
log_err(f"Failed to build {tag} after {max_attempts} attempts")
|
||||
return False
|
||||
|
||||
|
||||
def resolve_services(targets: list[str], config: dict) -> list[str] | None:
|
||||
"""Map friendly app names to compose service names. Returns None on error."""
|
||||
svc_map = config["definitions"]["service_map"]
|
||||
services = []
|
||||
for t in targets:
|
||||
svc = svc_map.get(t)
|
||||
if not svc:
|
||||
log_err(f"Unknown app: {t}")
|
||||
print(f"Available: {', '.join(config['definitions']['all_apps'])}")
|
||||
return None
|
||||
services.append(svc)
|
||||
return services
|
||||
|
||||
|
||||
def docker_image_size(tag: str) -> str:
|
||||
"""Return human-readable size of a Docker image."""
|
||||
result = subprocess.run(
|
||||
["docker", "image", "inspect", tag, "--format", "{{.Size}}"],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
try:
|
||||
return f"{int(result.stdout.strip()) / 1073741824:.1f} GB"
|
||||
except ValueError:
|
||||
return "?"
|
||||
68
deployment/cli/loader.py
Normal file
68
deployment/cli/loader.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""Load CLI structure from commands.json and dispatch to handler modules."""
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
CONFIG_PATH = Path(__file__).parent / "commands.json"
|
||||
|
||||
|
||||
def _load_config() -> dict:
|
||||
with open(CONFIG_PATH) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def _add_arguments(parser: argparse.ArgumentParser, arguments: list[dict]) -> None:
|
||||
"""Add arguments from JSON definitions to an argparse parser."""
|
||||
for arg_def in arguments:
|
||||
name = arg_def["name"]
|
||||
kwargs = {k: v for k, v in arg_def.items() if k != "name"}
|
||||
if name.startswith("-"):
|
||||
parser.add_argument(name, **kwargs)
|
||||
else:
|
||||
parser.add_argument(name, **kwargs)
|
||||
|
||||
|
||||
def _build_subcommands(
|
||||
parent_sub: argparse._SubParsersAction,
|
||||
commands: dict,
|
||||
) -> None:
|
||||
"""Recursively build subcommand parsers from JSON config."""
|
||||
for cmd_name, cmd_def in commands.items():
|
||||
parser = parent_sub.add_parser(cmd_name, help=cmd_def.get("help", ""))
|
||||
|
||||
if "module" in cmd_def:
|
||||
parser.set_defaults(_module=cmd_def["module"])
|
||||
|
||||
if "arguments" in cmd_def:
|
||||
_add_arguments(parser, cmd_def["arguments"])
|
||||
|
||||
if "subcommands" in cmd_def:
|
||||
sub = parser.add_subparsers(dest=f"{cmd_name}_cmd")
|
||||
_build_subcommands(sub, cmd_def["subcommands"])
|
||||
|
||||
|
||||
def build_parser() -> tuple[argparse.ArgumentParser, dict]:
|
||||
"""Build the full argparse parser from commands.json. Returns (parser, config)."""
|
||||
config = _load_config()
|
||||
prog_def = config["program"]
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
prog=prog_def["prog"],
|
||||
description=prog_def["description"],
|
||||
)
|
||||
sub = parser.add_subparsers(dest="command", help="Command group")
|
||||
_build_subcommands(sub, config["commands"])
|
||||
|
||||
return parser, config
|
||||
|
||||
|
||||
def dispatch(args: argparse.Namespace, config: dict) -> int:
|
||||
"""Dispatch parsed args to the appropriate handler module."""
|
||||
module_path = getattr(args, "_module", None)
|
||||
if not module_path:
|
||||
return 0
|
||||
|
||||
module = importlib.import_module(module_path)
|
||||
return module.run(args, config)
|
||||
133
deployment/cli/nexus_init.py
Normal file
133
deployment/cli/nexus_init.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Initialize Nexus repositories (Docker + npm, or npm-only for CI)."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
from cli.helpers import curl_status, log_err, run
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
nexus_url = os.environ.get("NEXUS_URL", "http://localhost:8091" if args.ci else "http://nexus:8081")
|
||||
new_pass = os.environ.get("NEXUS_ADMIN_NEW_PASS", "nexus")
|
||||
docker_port = os.environ.get("DOCKER_REPO_PORT", "5000")
|
||||
pass_file = "/tmp/nexus-data/admin.password" if args.ci else "/nexus-data/admin.password"
|
||||
prefix = "nexus-ci-init" if args.ci else "nexus-init"
|
||||
|
||||
def nlog(msg: str) -> None:
|
||||
print(f"[{prefix}] {msg}")
|
||||
|
||||
auth = f"admin:{new_pass}"
|
||||
|
||||
# Resolve admin password
|
||||
status = curl_status(f"{nexus_url}/service/rest/v1/status", auth)
|
||||
if status == 200:
|
||||
nlog(f"Already initialised with password '{new_pass}'")
|
||||
elif os.path.exists(pass_file):
|
||||
with open(pass_file) as f:
|
||||
init_pass = f.read().strip()
|
||||
nlog("First run: changing admin password...")
|
||||
result = subprocess.run([
|
||||
"curl", "-s", "-o", os.devnull, "-w", "%{http_code}", "-X", "PUT",
|
||||
f"{nexus_url}/service/rest/v1/security/users/admin/change-password",
|
||||
"-u", f"admin:{init_pass}", "-H", "Content-Type: text/plain", "-d", new_pass,
|
||||
], capture_output=True, text=True)
|
||||
if result.stdout.strip() == "204":
|
||||
nlog(f"Admin password set to '{new_pass}'")
|
||||
else:
|
||||
nlog(f"ERROR: password change returned HTTP {result.stdout.strip()}")
|
||||
return 1
|
||||
else:
|
||||
nlog("ERROR: cannot authenticate — is NEXUS_ADMIN_NEW_PASS correct?")
|
||||
return 1
|
||||
|
||||
# Enable anonymous access
|
||||
run(["curl", "-sf", "-X", "PUT", f"{nexus_url}/service/rest/v1/security/anonymous",
|
||||
"-u", auth, "-H", "Content-Type: application/json",
|
||||
"-d", '{"enabled":true,"userId":"anonymous","realmName":"NexusAuthorizingRealm"}'])
|
||||
nlog("Anonymous access enabled")
|
||||
|
||||
if not args.ci:
|
||||
# Docker + npm token realms
|
||||
run(["curl", "-sf", "-X", "PUT", f"{nexus_url}/service/rest/v1/security/realms/active",
|
||||
"-u", auth, "-H", "Content-Type: application/json",
|
||||
"-d", '["NexusAuthenticatingRealm","DockerToken","NpmToken"]'])
|
||||
nlog("Docker + npm Bearer Token realms enabled")
|
||||
|
||||
# Docker hosted repo
|
||||
docker_repo = json.dumps({
|
||||
"name": "local", "online": True,
|
||||
"storage": {"blobStoreName": "default", "strictContentTypeValidation": True, "writePolicy": "allow"},
|
||||
"docker": {"v1Enabled": False, "forceBasicAuth": False, "httpPort": int(docker_port)},
|
||||
})
|
||||
result = subprocess.run([
|
||||
"curl", "-s", "-o", os.devnull, "-w", "%{http_code}", "-X", "POST",
|
||||
f"{nexus_url}/service/rest/v1/repositories/docker/hosted",
|
||||
"-u", auth, "-H", "Content-Type: application/json", "-d", docker_repo,
|
||||
], capture_output=True, text=True)
|
||||
code = result.stdout.strip()
|
||||
if code == "201":
|
||||
nlog(f"Docker hosted repo 'local' created on port {docker_port}")
|
||||
elif code == "400":
|
||||
nlog("Docker repo 'local' already exists, skipping")
|
||||
else:
|
||||
nlog(f"ERROR: Docker repo creation returned HTTP {code}")
|
||||
return 1
|
||||
else:
|
||||
# CI: npm token realm only
|
||||
run(["curl", "-sf", "-X", "PUT", f"{nexus_url}/service/rest/v1/security/realms/active",
|
||||
"-u", auth, "-H", "Content-Type: application/json",
|
||||
"-d", '["NexusAuthenticatingRealm","NpmToken"]'])
|
||||
|
||||
# npm repos (hosted, proxy, group)
|
||||
npm_repos = [
|
||||
("npm/hosted", "npm-hosted", {
|
||||
"name": "npm-hosted", "online": True,
|
||||
"storage": {"blobStoreName": "default", "strictContentTypeValidation": True,
|
||||
"writePolicy": "allow" if args.ci else "allow_once"},
|
||||
}),
|
||||
("npm/proxy", "npm-proxy", {
|
||||
"name": "npm-proxy", "online": True,
|
||||
"storage": {"blobStoreName": "default", "strictContentTypeValidation": True},
|
||||
"proxy": {"remoteUrl": "https://registry.npmjs.org", "contentMaxAge": 1440, "metadataMaxAge": 1440},
|
||||
"httpClient": {"blocked": False, "autoBlock": True},
|
||||
"negativeCache": {"enabled": True, "timeToLive": 1440},
|
||||
}),
|
||||
("npm/group", "npm-group", {
|
||||
"name": "npm-group", "online": True,
|
||||
"storage": {"blobStoreName": "default", "strictContentTypeValidation": True},
|
||||
"group": {"memberNames": ["npm-hosted", "npm-proxy"]},
|
||||
}),
|
||||
]
|
||||
|
||||
for repo_type, label, body in npm_repos:
|
||||
result = subprocess.run([
|
||||
"curl", "-s", "-o", os.devnull, "-w", "%{http_code}", "-X", "POST",
|
||||
f"{nexus_url}/service/rest/v1/repositories/{repo_type}",
|
||||
"-u", auth, "-H", "Content-Type: application/json", "-d", json.dumps(body),
|
||||
], capture_output=True, text=True)
|
||||
code = result.stdout.strip()
|
||||
if code == "201":
|
||||
nlog(f"{label} repo created")
|
||||
elif code == "400":
|
||||
nlog(f"{label} repo already exists, skipping")
|
||||
else:
|
||||
nlog(f"ERROR creating {label}: HTTP {code}")
|
||||
return 1
|
||||
|
||||
if args.ci:
|
||||
nlog("Nexus CI init complete")
|
||||
else:
|
||||
nlog("")
|
||||
nlog("=" * 46)
|
||||
nlog(" Nexus ready!")
|
||||
nlog(f" Registry : localhost:{docker_port}")
|
||||
nlog(f" Web UI : http://localhost:8091")
|
||||
nlog(f" Login : admin / {new_pass}")
|
||||
nlog(f" npm group: {nexus_url}/repository/npm-group/")
|
||||
nlog("=" * 46)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
run = run_cmd
|
||||
68
deployment/cli/nexus_populate.py
Normal file
68
deployment/cli/nexus_populate.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""Push all locally-built images to Nexus with :main + :latest tags."""
|
||||
|
||||
import argparse
|
||||
from cli.helpers import (
|
||||
BLUE, GREEN, NC,
|
||||
docker_image_exists, log_err, log_info, log_ok, log_warn, run,
|
||||
)
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
nexus = "localhost:5050"
|
||||
slug = "johndoe6345789/metabuilder-small"
|
||||
nexus_user, nexus_pass = "admin", "nexus"
|
||||
|
||||
log_info(f"Logging in to {nexus}...")
|
||||
run(["docker", "login", nexus, "-u", nexus_user, "--password-stdin"],
|
||||
input=nexus_pass.encode())
|
||||
|
||||
images_def = config["definitions"]["nexus_images"]
|
||||
|
||||
pushed = skipped = failed = 0
|
||||
|
||||
def push_image(src: str, name: str, size: str) -> None:
|
||||
nonlocal pushed, skipped, failed
|
||||
if not docker_image_exists(src):
|
||||
log_warn(f"SKIP {name} — {src} not found locally")
|
||||
skipped += 1
|
||||
return
|
||||
|
||||
dst_main = f"{nexus}/{slug}/{name}:main"
|
||||
dst_latest = f"{nexus}/{slug}/{name}:latest"
|
||||
|
||||
log_info(f"Pushing {name} ({size})...")
|
||||
run(["docker", "tag", src, dst_main])
|
||||
run(["docker", "tag", src, dst_latest])
|
||||
|
||||
r1 = run(["docker", "push", dst_main])
|
||||
r2 = run(["docker", "push", dst_latest])
|
||||
if r1.returncode == 0 and r2.returncode == 0:
|
||||
log_ok(f" {name} -> :main + :latest")
|
||||
pushed += 1
|
||||
else:
|
||||
log_err(f" {name} FAILED")
|
||||
failed += 1
|
||||
|
||||
print(f"\n{BLUE}Registry : {nexus}{NC}")
|
||||
print(f"{BLUE}Slug : {slug}{NC}")
|
||||
print(f"{BLUE}Skip heavy: {args.skip_heavy}{NC}\n")
|
||||
|
||||
for entry in images_def["base"] + images_def["apps"]:
|
||||
push_image(entry["local"], entry["name"], entry["size"])
|
||||
|
||||
if args.skip_heavy:
|
||||
log_warn("Skipping heavy images (--skip-heavy set):")
|
||||
for entry in images_def["heavy"] + images_def["heavy_apps"]:
|
||||
log_warn(f" {entry['name']} ({entry['size']})")
|
||||
else:
|
||||
log_info("--- Heavy images (this will take a while) ---")
|
||||
for entry in images_def["heavy_apps"] + images_def["heavy"]:
|
||||
push_image(entry["local"], entry["name"], entry["size"])
|
||||
|
||||
print(f"\n{GREEN}{'=' * 46}{NC}")
|
||||
print(f"{GREEN} Done. pushed={pushed} skipped={skipped} failed={failed}{NC}")
|
||||
print(f"{GREEN}{'=' * 46}{NC}")
|
||||
return 1 if failed else 0
|
||||
|
||||
|
||||
run = run_cmd
|
||||
85
deployment/cli/nexus_push.py
Normal file
85
deployment/cli/nexus_push.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""Push locally-built images to local Nexus registry for act CI runner."""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import subprocess
|
||||
from cli.helpers import (
|
||||
PROJECT_ROOT, GREEN, YELLOW, RED, NC,
|
||||
docker_image_exists, log_info, run,
|
||||
)
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
local_registry = "localhost:5050"
|
||||
nexus_user, nexus_pass = "admin", "nexus"
|
||||
|
||||
# Derive repo slug from git
|
||||
result = subprocess.run(
|
||||
["git", "-C", str(PROJECT_ROOT), "remote", "get-url", "origin"],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
slug = "johndoe6345789/metabuilder-small"
|
||||
if result.returncode == 0:
|
||||
m = re.search(r"github\.com[:/]([^/]+/[^/.]+)", result.stdout.strip())
|
||||
if m:
|
||||
slug = m.group(1).lower()
|
||||
|
||||
source_registry = args.src
|
||||
|
||||
if args.tag:
|
||||
tag = args.tag
|
||||
else:
|
||||
result = subprocess.run(
|
||||
["git", "-C", str(PROJECT_ROOT), "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
tag = result.stdout.strip() if result.returncode == 0 else "main"
|
||||
|
||||
print(f"{YELLOW}Registry:{NC} {local_registry}")
|
||||
print(f"{YELLOW}Slug:{NC} {slug}")
|
||||
print(f"{YELLOW}Tag:{NC} {tag}\n")
|
||||
|
||||
log_info(f"Logging in to {local_registry}...")
|
||||
run(["docker", "login", local_registry, "-u", nexus_user, "--password-stdin"],
|
||||
input=nexus_pass.encode())
|
||||
|
||||
base_images = ["base-apt", "base-node-deps", "base-pip-deps", "base-conan-deps",
|
||||
"base-android-sdk", "devcontainer"]
|
||||
app_images = ["pastebin", "workflowui", "codegen", "postgres-dashboard",
|
||||
"emailclient", "exploded-diagrams", "storybook"]
|
||||
|
||||
pushed = skipped = failed = 0
|
||||
for image in base_images + app_images:
|
||||
src = f"{source_registry}/{slug}/{image}:{tag}"
|
||||
dst = f"{local_registry}/{slug}/{image}:{tag}"
|
||||
|
||||
if args.pull:
|
||||
print(f" {YELLOW}pulling{NC} {src}...")
|
||||
if not docker_image_exists(src):
|
||||
result = run(["docker", "pull", src])
|
||||
if result.returncode != 0:
|
||||
print(f" {YELLOW}skip{NC} {image} (not found in {source_registry})")
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
if not docker_image_exists(src) and not docker_image_exists(dst):
|
||||
print(f" {YELLOW}skip{NC} {image} (not found locally)")
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
if docker_image_exists(src):
|
||||
run(["docker", "tag", src, dst])
|
||||
|
||||
print(f" {GREEN}push{NC} {dst}")
|
||||
result = run(["docker", "push", dst])
|
||||
if result.returncode == 0:
|
||||
pushed += 1
|
||||
else:
|
||||
print(f" {RED}FAILED{NC} {image}")
|
||||
failed += 1
|
||||
|
||||
print(f"\n{GREEN}Done.{NC} pushed={pushed} skipped={skipped} failed={failed}")
|
||||
return 1 if failed else 0
|
||||
|
||||
|
||||
run = run_cmd
|
||||
111
deployment/cli/npm_patches.py
Normal file
111
deployment/cli/npm_patches.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""Publish patched npm packages to a local registry (Nexus or Verdaccio)."""
|
||||
|
||||
import argparse
|
||||
import base64
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from cli.helpers import (
|
||||
SCRIPT_DIR, GREEN, NC,
|
||||
curl_status, log_err, log_info, log_ok, log_warn, run,
|
||||
)
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
nexus_url = os.environ.get("NEXUS_URL", "http://localhost:8091")
|
||||
nexus_user = os.environ.get("NEXUS_USER", "admin")
|
||||
nexus_pass = os.environ.get("NEXUS_PASS", "nexus")
|
||||
verdaccio_url = os.environ.get("VERDACCIO_URL", "http://localhost:4873")
|
||||
|
||||
use_nexus = args.nexus
|
||||
use_verdaccio = args.verdaccio
|
||||
|
||||
# Auto-detect
|
||||
if not use_nexus and not use_verdaccio:
|
||||
if curl_status(f"{nexus_url}/service/rest/v1/status", f"{nexus_user}:{nexus_pass}") == 200:
|
||||
use_nexus = True
|
||||
else:
|
||||
use_verdaccio = True
|
||||
|
||||
patches_def = config["definitions"]["npm_patches"]
|
||||
patches_dir = SCRIPT_DIR / "npm-patches"
|
||||
|
||||
with tempfile.TemporaryDirectory() as work_dir:
|
||||
npmrc_path = Path(work_dir) / ".npmrc"
|
||||
|
||||
if use_nexus:
|
||||
npm_hosted = f"{nexus_url}/repository/npm-hosted/"
|
||||
log_info(f"Using Nexus at {nexus_url}...")
|
||||
http = curl_status(f"{nexus_url}/service/rest/v1/status", f"{nexus_user}:{nexus_pass}")
|
||||
if http != 200:
|
||||
log_err(f"Cannot reach Nexus (HTTP {http}). Is it running?")
|
||||
return 1
|
||||
nexus_auth = base64.b64encode(f"{nexus_user}:{nexus_pass}".encode()).decode()
|
||||
host_part = npm_hosted.split("://", 1)[1]
|
||||
npmrc_path.write_text(f"//{host_part}:_auth={nexus_auth}\n")
|
||||
publish_args = ["--userconfig", str(npmrc_path)]
|
||||
else:
|
||||
log_info(f"Using Verdaccio at {verdaccio_url}...")
|
||||
http = curl_status(f"{verdaccio_url}/-/ping")
|
||||
if http != 200:
|
||||
log_err(f"Cannot reach Verdaccio (HTTP {http}). Start with: npx verdaccio --config deployment/verdaccio.yaml")
|
||||
return 1
|
||||
host_part = verdaccio_url.split("://", 1)[1]
|
||||
npmrc_path.write_text(f"registry={verdaccio_url}/\n//{host_part}/:_authToken=\n")
|
||||
publish_args = ["--registry", verdaccio_url, "--userconfig", str(npmrc_path)]
|
||||
|
||||
published = skipped = 0
|
||||
|
||||
# Local patches
|
||||
for patch in patches_def["local"]:
|
||||
pkg_name = patch["name"]
|
||||
pkg_version = patch["version"]
|
||||
tarball_name = patch["tarball"]
|
||||
log_info(f"Processing local patch {pkg_name}@{pkg_version}...")
|
||||
tarball = patches_dir / tarball_name
|
||||
if not tarball.exists():
|
||||
log_err(f"Patched tarball not found: {tarball}")
|
||||
return 1
|
||||
result = run(["npm", "publish", str(tarball), *publish_args, "--tag", "patched"])
|
||||
if result.returncode == 0:
|
||||
log_ok(f"Published {pkg_name}@{pkg_version}")
|
||||
published += 1
|
||||
else:
|
||||
log_warn(f"{pkg_name}@{pkg_version} already exists or publish failed, skipping")
|
||||
skipped += 1
|
||||
|
||||
# Registry patches
|
||||
for pkg_spec in patches_def["registry"]:
|
||||
pkg_name, pkg_version = pkg_spec.rsplit("@", 1)
|
||||
log_info(f"Processing {pkg_name}@{pkg_version}...")
|
||||
|
||||
result = subprocess.run(
|
||||
["npm", "pack", pkg_spec],
|
||||
capture_output=True, text=True, cwd=work_dir,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
log_err(f"Failed to download {pkg_spec}")
|
||||
return 1
|
||||
tarball = result.stdout.strip().split("\n")[-1]
|
||||
tarball_path = Path(work_dir) / tarball
|
||||
|
||||
result = run(["npm", "publish", str(tarball_path), *publish_args, "--tag", "patched"])
|
||||
if result.returncode == 0:
|
||||
log_ok(f"Published {pkg_name}@{pkg_version}")
|
||||
published += 1
|
||||
else:
|
||||
log_warn(f"{pkg_name}@{pkg_version} already exists or publish failed, skipping")
|
||||
skipped += 1
|
||||
|
||||
tarball_path.unlink(missing_ok=True)
|
||||
|
||||
print(f"\n{GREEN}Done. published={published} skipped={skipped}{NC}")
|
||||
if use_nexus:
|
||||
print(f"Nexus npm-group: {nexus_url}/repository/npm-group/")
|
||||
else:
|
||||
print(f"Verdaccio registry: {verdaccio_url}")
|
||||
return 0
|
||||
|
||||
|
||||
run = run_cmd
|
||||
70
deployment/cli/release.py
Normal file
70
deployment/cli/release.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""Bump version, commit, push, and deploy an app."""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from cli.helpers import (
|
||||
PROJECT_ROOT, CYAN, GREEN, YELLOW, NC,
|
||||
docker_compose, log_err, log_ok, run_check,
|
||||
)
|
||||
|
||||
|
||||
def run(args: argparse.Namespace, config: dict) -> int:
|
||||
app = args.app
|
||||
bump = args.bump
|
||||
|
||||
# Find package.json
|
||||
pkg_path = None
|
||||
for candidate in [
|
||||
PROJECT_ROOT / "frontends" / app / "package.json",
|
||||
PROJECT_ROOT / app / "package.json",
|
||||
]:
|
||||
if candidate.exists():
|
||||
pkg_path = candidate
|
||||
break
|
||||
|
||||
if not pkg_path:
|
||||
log_err(f"Cannot find package.json for '{app}'")
|
||||
return 1
|
||||
|
||||
with open(pkg_path) as f:
|
||||
pkg = json.load(f)
|
||||
current = pkg["version"]
|
||||
|
||||
# Compute next version
|
||||
if re.match(r"^\d+\.\d+\.\d+$", bump):
|
||||
next_ver = bump
|
||||
else:
|
||||
major, minor, patch = (int(x) for x in current.split("."))
|
||||
if bump == "major":
|
||||
next_ver = f"{major + 1}.0.0"
|
||||
elif bump == "minor":
|
||||
next_ver = f"{major}.{minor + 1}.0"
|
||||
elif bump == "patch":
|
||||
next_ver = f"{major}.{minor}.{patch + 1}"
|
||||
else:
|
||||
log_err(f"Unknown bump type '{bump}'. Use patch, minor, major, or x.y.z")
|
||||
return 1
|
||||
|
||||
print(f"{CYAN}Releasing {app}: {YELLOW}{current}{CYAN} -> {GREEN}{next_ver}{NC}")
|
||||
|
||||
# Update package.json
|
||||
pkg["version"] = next_ver
|
||||
with open(pkg_path, "w") as f:
|
||||
json.dump(pkg, f, indent=2)
|
||||
f.write("\n")
|
||||
|
||||
# Commit and push
|
||||
os.chdir(PROJECT_ROOT)
|
||||
run_check(["git", "add", str(pkg_path)])
|
||||
run_check(["git", "commit", "-m",
|
||||
f"chore: bump {app} to v{next_ver}\n\n"
|
||||
f"Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>"])
|
||||
run_check(["git", "push", "origin", "main"])
|
||||
|
||||
print(f"{CYAN}Building and deploying {app}...{NC}")
|
||||
run_check(docker_compose("up", "-d", "--build", app))
|
||||
|
||||
log_ok(f"{app} v{next_ver} deployed")
|
||||
return 0
|
||||
130
deployment/cli/stack.py
Normal file
130
deployment/cli/stack.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""Manage the full MetaBuilder stack (up, down, build, restart, logs, ps, clean)."""
|
||||
|
||||
import argparse
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from cli.helpers import (
|
||||
GREEN, YELLOW, BLUE, RED, NC,
|
||||
docker_compose, log_info, log_ok, log_warn, log_err, pull_with_retry, run,
|
||||
)
|
||||
|
||||
|
||||
def _pull_external_images(profiles: list[str], config: dict) -> None:
|
||||
"""Pre-pull external images so compose up doesn't block."""
|
||||
ext = config["definitions"]["external_images"]
|
||||
images = list(ext["core"])
|
||||
|
||||
if "--profile" in profiles:
|
||||
prof_names = [profiles[i + 1] for i in range(len(profiles)) if profiles[i] == "--profile"]
|
||||
if "monitoring" in prof_names:
|
||||
images += ext["monitoring"]
|
||||
if "media" in prof_names:
|
||||
images += ext["media"]
|
||||
|
||||
log_info(f"Pre-pulling {len(images)} external images...")
|
||||
failed = 0
|
||||
for i, img in enumerate(images, 1):
|
||||
print(f" [{i}/{len(images)}] {img}")
|
||||
if not pull_with_retry(img):
|
||||
failed += 1
|
||||
|
||||
if failed:
|
||||
log_warn(f"{failed} image(s) failed to pull. Stack may be incomplete.")
|
||||
else:
|
||||
log_ok("All images ready.")
|
||||
|
||||
|
||||
def _wait_for_healthy(profiles: list[str], args: argparse.Namespace) -> None:
|
||||
core_count = 23
|
||||
profile_info = "core"
|
||||
if args.monitoring or args.all_profiles:
|
||||
core_count += 9
|
||||
profile_info += " + monitoring"
|
||||
if args.media or args.all_profiles:
|
||||
core_count += 3
|
||||
profile_info += " + media"
|
||||
|
||||
print(f"{YELLOW}Waiting for services ({profile_info})...{NC}")
|
||||
max_wait = 120
|
||||
for elapsed in range(0, max_wait, 2):
|
||||
result = subprocess.run(
|
||||
docker_compose(*profiles, "ps", "--format", "json"),
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
healthy = result.stdout.count('"healthy"')
|
||||
if healthy >= core_count:
|
||||
print(f"\n{GREEN}All {core_count} services healthy!{NC}")
|
||||
print(f"\nPortal: {BLUE}http://localhost{NC}\n")
|
||||
print("Quick commands:")
|
||||
print(" python3 deployment.py stack logs")
|
||||
print(" python3 deployment.py stack down")
|
||||
return
|
||||
sys.stdout.write(f"\r Services healthy: {healthy}/{core_count} ({elapsed}s)")
|
||||
sys.stdout.flush()
|
||||
time.sleep(2)
|
||||
|
||||
print(f"\n{YELLOW}Timeout waiting for all services.{NC}")
|
||||
print(" python3 deployment.py stack ps")
|
||||
|
||||
|
||||
def run_cmd(args: argparse.Namespace, config: dict) -> int:
|
||||
profiles: list[str] = []
|
||||
if args.monitoring or args.all_profiles:
|
||||
profiles += ["--profile", "monitoring"]
|
||||
if args.media or args.all_profiles:
|
||||
profiles += ["--profile", "media"]
|
||||
|
||||
command = args.command or "up"
|
||||
|
||||
# Check docker compose
|
||||
if subprocess.run(["docker", "compose", "version"], capture_output=True).returncode != 0:
|
||||
log_err("docker compose not found")
|
||||
return 1
|
||||
|
||||
if command in ("down", "stop"):
|
||||
log_info("Stopping MetaBuilder stack...")
|
||||
run(docker_compose(*profiles, "down"))
|
||||
log_ok("Stack stopped")
|
||||
return 0
|
||||
|
||||
if command == "restart":
|
||||
run(docker_compose(*profiles, "restart"))
|
||||
log_ok("Stack restarted")
|
||||
return 0
|
||||
|
||||
if command == "logs":
|
||||
run(docker_compose(*profiles, "logs", "-f"))
|
||||
return 0
|
||||
|
||||
if command in ("ps", "status"):
|
||||
run(docker_compose(*profiles, "ps"))
|
||||
return 0
|
||||
|
||||
if command == "clean":
|
||||
answer = input(f"{RED}This will remove all containers and volumes! Are you sure? (yes/no): {NC}")
|
||||
if answer.strip() == "yes":
|
||||
run(docker_compose(*profiles, "down", "-v"))
|
||||
log_ok("Stack cleaned")
|
||||
return 0
|
||||
|
||||
if command == "build":
|
||||
log_info("Building MetaBuilder stack...")
|
||||
_pull_external_images(profiles, config)
|
||||
run(docker_compose(*profiles, "up", "-d", "--build"))
|
||||
log_ok("Stack built and started")
|
||||
return 0
|
||||
|
||||
if command in ("up", "start"):
|
||||
log_info("Starting MetaBuilder stack...")
|
||||
_pull_external_images(profiles, config)
|
||||
run(docker_compose(*profiles, "up", "-d"))
|
||||
print(f"\n{GREEN}Stack started!{NC}\n")
|
||||
_wait_for_healthy(profiles, args)
|
||||
return 0
|
||||
|
||||
log_err(f"Unknown command: {command}")
|
||||
return 1
|
||||
|
||||
|
||||
run = run_cmd
|
||||
48
deployment/deployment.py
Normal file
48
deployment/deployment.py
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python3
|
||||
"""MetaBuilder Deployment CLI — JSON-powered, modular command system.
|
||||
|
||||
All command definitions live in cli/commands.json.
|
||||
Each command dispatches to a Python module in cli/*.py.
|
||||
|
||||
Usage:
|
||||
python3 deployment.py --help
|
||||
python3 deployment.py build base [--force] [apt] [node-deps] ...
|
||||
python3 deployment.py build apps [--force] [--sequential] [codegen] ...
|
||||
python3 deployment.py build testcontainers [--skip-native] [--skip-sidecar]
|
||||
python3 deployment.py deploy <app> [--all] [--no-cache]
|
||||
python3 deployment.py stack up|down|build|logs|ps|clean [--monitoring] [--media]
|
||||
python3 deployment.py release <app> [patch|minor|major|x.y.z]
|
||||
python3 deployment.py nexus init|push|populate
|
||||
python3 deployment.py npm publish-patches [--nexus] [--verdaccio]
|
||||
python3 deployment.py artifactory init
|
||||
"""
|
||||
|
||||
import sys
|
||||
from cli.loader import build_parser, dispatch
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser, config = build_parser()
|
||||
args = parser.parse_args()
|
||||
|
||||
if not args.command:
|
||||
parser.print_help()
|
||||
return 0
|
||||
|
||||
# Handle --list for build base
|
||||
if args.command == "build" and getattr(args, "build_type", None) == "base" and getattr(args, "list", False):
|
||||
for name, img in config["definitions"]["base_images"].items():
|
||||
print(f" {name} -> {img['tag']}")
|
||||
return 0
|
||||
|
||||
module_path = getattr(args, "_module", None)
|
||||
if not module_path:
|
||||
# No module set — print help for the subcommand group
|
||||
parser.parse_args([args.command, "--help"])
|
||||
return 0
|
||||
|
||||
return dispatch(args, config)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -1,7 +1,7 @@
|
||||
Flask==3.1.3
|
||||
Flask-CORS==6.0.0
|
||||
pyjwt==2.10.1
|
||||
rocksdict==0.3.23
|
||||
rocksdict==0.3.29
|
||||
werkzeug==3.1.6
|
||||
jsonschema==4.20.0
|
||||
bcrypt==4.1.2
|
||||
|
||||
Reference in New Issue
Block a user