ROADMAP.md

This commit is contained in:
2026-01-10 02:55:18 +00:00
parent 8dfea6c736
commit 3454025381
769 changed files with 54287 additions and 61447 deletions

0
python/__init__.py Normal file
View File

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env python3
"""Convert the bundled XM tracker file to an OGG so the demo can play music."""
from __future__ import annotations
import argparse
import shlex
import subprocess
from pathlib import Path
import imageio_ffmpeg
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Convert scripts/modmusic.xm into OGG.")
parser.add_argument(
"--input",
type=Path,
default=Path(__file__).parent / "modmusic.xm",
help="Tracker file to render (default: scripts/modmusic.xm).",
)
parser.add_argument(
"--output",
type=Path,
default=Path(__file__).parent / "modmusic.ogg",
help="Path for the rendered OGG (default next to scripts/modmusic.xm).",
)
parser.add_argument(
"--bitrate",
default="192k",
help="FFmpeg audio bitrate (default: 192k).",
)
return parser.parse_args()
def main() -> None:
args = parse_args()
if not args.input.exists():
raise SystemExit(f"Error: XM source {args.input} is missing")
args.output.parent.mkdir(parents=True, exist_ok=True)
ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
ffmpeg_cmd = [
ffmpeg_path,
"-y",
"-i",
str(args.input),
"-b:a",
args.bitrate,
str(args.output),
]
print("Executing:", " ".join(shlex.quote(arg) for arg in ffmpeg_cmd))
subprocess.run(ffmpeg_cmd, check=True)
if __name__ == "__main__":
main()

1986
python/dev_commands.py Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,359 @@
#!/usr/bin/env python3
"""Create the demo's shared `.ogg` audio library via ``pedalboard`` + ``soundfile``."""
from __future__ import annotations
import argparse
import logging
import shutil
from pathlib import Path
from typing import Optional
logger = logging.getLogger(__name__)
try:
import numpy as np
import soundfile as sf
from pedalboard import Chorus, Delay, Distortion, Pedalboard, Reverb
except ImportError as exc: # pragma: no cover - requires extra dependencies
raise SystemExit(
"Missing pedalboard or soundfile. Install them with "
"`python -m pip install pedalboard soundfile numpy`. "
f"ImportError: {exc}"
) from exc
try:
from piper import PiperVoice
except ImportError:
PiperVoice = None # type: ignore[assignment]
try:
from piper.download_voices import download_voice
except ImportError:
download_voice = None # type: ignore[assignment]
SAMPLE_RATE = 44100
TTS_PHRASES = [
"Level 1",
"Level 2",
"Game Over",
"Continue",
"Power Up",
]
DEFAULT_PIPER_VOICE_NAME = "en_US-lessac-medium"
DEFAULT_PIPER_VOICE_DIR = (
Path(__file__).resolve().parent / "assets" / "audio" / "tts" / "voices"
)
def _num_samples(duration: float) -> int:
return max(1, int(round(duration * SAMPLE_RATE)))
def _apply_envelope(
signal: np.ndarray,
attack: float,
decay: float,
sustain_level: float,
release: float,
) -> np.ndarray:
total = signal.shape[0]
attack_samples = min(total, int(round(attack * SAMPLE_RATE)))
decay_samples = min(total - attack_samples, int(round(decay * SAMPLE_RATE)))
release_samples = min(
total - attack_samples - decay_samples, int(round(release * SAMPLE_RATE))
)
sustain_samples = total - (attack_samples + decay_samples + release_samples)
env = np.empty(total, dtype=np.float32)
idx = 0
if attack_samples > 0:
env[idx : idx + attack_samples] = np.linspace(0.0, 1.0, attack_samples, False)
idx += attack_samples
if decay_samples > 0:
env[idx : idx + decay_samples] = np.linspace(
1.0, sustain_level, decay_samples, False
)
idx += decay_samples
if sustain_samples > 0:
env[idx : idx + sustain_samples] = sustain_level
idx += sustain_samples
if release_samples > 0:
env[-release_samples:] = np.linspace(
sustain_level, 0.0, release_samples, False
)
if idx + sustain_samples < total - release_samples:
env[idx + sustain_samples : -release_samples] = sustain_level
if attack_samples + decay_samples + sustain_samples + release_samples == 0:
env[:] = 1.0
return signal * env
def _download_piper_voice(voice_name: str, download_dir: Path) -> None:
if download_voice is None:
logger.warning(
"Automatic voice download requires `piper.download_voices`; install piper-tts to enable it."
)
return
download_dir.mkdir(parents=True, exist_ok=True)
logger.info("Downloading Piper voice %s into %s", voice_name, download_dir)
download_voice(voice_name, download_dir)
def _sine_glide(duration: float, start_freq: float, end_freq: float) -> np.ndarray:
samples = _num_samples(duration)
t = np.linspace(0.0, duration, samples, False)
freq = np.linspace(start_freq, end_freq, samples)
return np.sin(2 * np.pi * freq * t).astype(np.float32)
def _pink_noise(duration: float) -> np.ndarray:
samples = _num_samples(duration)
return np.random.normal(scale=0.15, size=samples).astype(np.float32)
def _menu_select(duration: float) -> tuple[np.ndarray, list]:
signal = _sine_glide(duration, 520, 960)
signal = _apply_envelope(signal, 0.01, 0.12, 0.6, 0.15)
effects = [Chorus(rate_hz=1.1, depth=0.18, mix=0.6)]
return signal, effects
def _power_up(duration: float) -> tuple[np.ndarray, list]:
signal = _sine_glide(duration, 270, 940)
signal += 0.25 * _pink_noise(duration)
signal = _apply_envelope(signal, 0.02, 0.26, 0.45, 0.2)
effects = [
Distortion(drive_db=14.0),
Reverb(room_size=0.45, wet_level=0.25, dry_level=0.9),
]
return signal, effects
def _level_up(duration: float) -> tuple[np.ndarray, list]:
base = _sine_glide(duration, 420, 660)
harmony = 0.45 * _sine_glide(duration, 660, 840)
signal = (base + harmony) / 1.45
signal = _apply_envelope(signal, 0.01, 0.18, 0.55, 0.25)
effects = [
Chorus(rate_hz=0.95, depth=0.24, mix=0.55),
Delay(delay_seconds=0.18, feedback=0.25, mix=0.35),
]
return signal, effects
def _swish(duration: float) -> tuple[np.ndarray, list]:
signal = _pink_noise(duration)
signal = _apply_envelope(signal, 0.1, 0.3, 0.2, 0.3)
effects = [
Reverb(room_size=0.7, wet_level=0.5, dry_level=0.4),
Chorus(rate_hz=0.4, depth=0.2, mix=0.45),
]
return signal, effects
def _thud(duration: float) -> tuple[np.ndarray, list]:
base = _sine_glide(duration, 80, 120)
signal = 0.7 * base + 0.3 * _pink_noise(duration)
signal = _apply_envelope(signal, 0.01, duration * 0.4, 0.0, 0.3)
effects = [
Distortion(drive_db=10.0),
Reverb(room_size=0.85, wet_level=0.55, dry_level=0.3),
]
return signal, effects
SFX_DEFINITIONS = [
("menu_select", 0.65, "short ascending ping", _menu_select),
("power_up", 1.1, "riser with harmonic shimmer", _power_up),
("level_up", 0.9, "bright burst", _level_up),
("swish", 0.7, "noisy transition", _swish),
("thud", 1.0, "low impact", _thud),
]
def _render_ogg(path: Path, duration: float, builder):
signal, effects = builder(duration)
processed = signal
if effects:
board = Pedalboard(effects)
processed = board(processed, SAMPLE_RATE)
processed = np.clip(processed, -1.0, 1.0).astype(np.float32)
sf.write(
str(path),
processed,
SAMPLE_RATE,
format="OGG",
subtype="VORBIS",
)
def _slugify(text: str) -> str:
return "".join(ch if ch.isalnum() else "_" for ch in text).strip("_").lower()
def _configure_logging(verbose: bool) -> None:
level = logging.DEBUG if verbose else logging.INFO
logging.basicConfig(level=level, format="%(levelname)s: %(message)s")
def _generate_sfx(output_dir: Path, force: bool):
target_dir = output_dir / "sfx"
if force and target_dir.exists():
logger.debug("Removing existing SFX folder %s", target_dir)
shutil.rmtree(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
for name, duration, description, builder in SFX_DEFINITIONS:
target = target_dir / f"{name}.ogg"
if target.exists() and not force:
logger.info(f"Skipping existing sound: {target.name} ({description})")
continue
logger.info(f"Rendering SFX: {name} -> {target.name}")
_render_ogg(target, duration, builder)
def _load_piper_voice(model_path: Path, config_path: Optional[Path]) -> Optional["PiperVoice"]:
if PiperVoice is None:
logger.warning("piper-tts is not installed; skipping voice generation.")
return None
if not model_path.exists():
logger.warning("Piper voice model not found at %s; skip TTS.", model_path)
return None
resolved_config = config_path or Path(f"{model_path}.json")
if not resolved_config.exists():
logger.warning(
"Piper voice config not found at %s; skip TTS generation.", resolved_config
)
return None
return PiperVoice.load(str(model_path), config_path=str(resolved_config))
def _synthesize_phrase_to_ogg(voice: "PiperVoice", phrase: str, path: Path) -> None:
chunks = list(voice.synthesize(phrase))
if not chunks:
logger.warning("Piper generated no audio for phrase '%s'", phrase)
return
audio = np.concatenate([chunk.audio_float_array for chunk in chunks])
audio = np.clip(audio, -1.0, 1.0).astype(np.float32)
sf.write(
str(path),
audio,
voice.config.sample_rate,
format="OGG",
subtype="VORBIS",
)
def _generate_tts(
output_dir: Path,
force: bool,
voice_model: Path,
voice_config: Optional[Path],
) -> None:
voice = _load_piper_voice(voice_model, voice_config)
if voice is None:
return
tts_dir = output_dir / "tts"
if force and tts_dir.exists():
logger.debug("Removing existing TTS folder %s", tts_dir)
shutil.rmtree(tts_dir)
tts_dir.mkdir(parents=True, exist_ok=True)
for phrase in TTS_PHRASES:
slug = _slugify(phrase)
target = tts_dir / f"{slug}.ogg"
if target.exists() and not force:
logger.info(f"Skipping existing voice: {target.name} ({phrase})")
continue
logger.info(f"Rendering voice: {phrase} -> {target.name}")
_synthesize_phrase_to_ogg(voice, phrase, target)
def main():
parser = argparse.ArgumentParser(
description="Regenerate the OGG sound library for the demo."
)
parser.add_argument(
"--output-dir",
type=Path,
default=Path(__file__).resolve().parent / "assets" / "audio",
help="Where to store generated OGG files.",
)
parser.add_argument(
"--force",
action="store_true",
help="Rebuild every asset even if a file already exists.",
)
parser.add_argument(
"--skip-tts",
action="store_true",
help="Do not regenerate the text-to-speech phrases.",
)
parser.add_argument(
"--skip-sfx",
action="store_true",
help="Do not regenerate the procedural sound effects.",
)
parser.add_argument(
"--verbose",
action="store_true",
help="Enable debug logging while generating audio assets.",
)
parser.add_argument(
"--piper-voice",
default=DEFAULT_PIPER_VOICE_NAME,
help="Piper voice identifier like 'en_US-lessac-medium'.",
)
parser.add_argument(
"--piper-voice-model",
type=Path,
help="Path to the Piper ONNX voice model (defaults to <download-dir>/<voice>.onnx).",
)
parser.add_argument(
"--piper-voice-config",
type=Path,
help="Path to the Piper voice config JSON (defaults to <model>.json).",
)
parser.add_argument(
"--download-voice",
action="store_true",
help="Automatically download the Piper voice before generating TTS.",
)
args = parser.parse_args()
_configure_logging(args.verbose)
logger.debug("Output directory: %s", args.output_dir)
voice_model_directory = (
args.piper_voice_model.parent if args.piper_voice_model else DEFAULT_PIPER_VOICE_DIR
)
voice_model = args.piper_voice_model or (
voice_model_directory / f"{args.piper_voice}.onnx"
)
voice_config = args.piper_voice_config
voice_model.parent.mkdir(parents=True, exist_ok=True)
args.output_dir.mkdir(parents=True, exist_ok=True)
if args.download_voice:
_download_piper_voice(args.piper_voice, voice_model.parent)
if not args.skip_sfx:
_generate_sfx(args.output_dir, args.force)
if not args.skip_tts:
_generate_tts(
args.output_dir,
args.force,
voice_model,
voice_config,
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env python3
"""Produce a cube STL with CadQuery for the Lua scene to load."""
from __future__ import annotations
import argparse
from pathlib import Path
import cadquery as cq
from cadquery import exporters
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Generate a simple cube STL.")
parser.add_argument(
"--size",
type=float,
default=2.0,
help="Edge length of the cube in model units (default: 2.0 to match Lua cube bounds).",
)
parser.add_argument(
"-o",
"--output",
type=Path,
default=Path(__file__).parent / "models" / "cube.stl",
help="Path to write the ASCII STL file.",
)
return parser.parse_args()
def main() -> None:
args = parse_args()
args.output.parent.mkdir(parents=True, exist_ok=True)
cube = cq.Workplane("XY").box(args.size, args.size, args.size)
exporters.export(
cube,
str(args.output),
exportType=exporters.ExportTypes.STL,
opt={"ascii": True},
)
print(f"Wrote cube STL to {args.output}")
if __name__ == "__main__":
main()

239
python/package_lint.py Normal file
View File

@@ -0,0 +1,239 @@
#!/usr/bin/env python3
"""
Lightweight package validator that walks the `packages/` tree for all `package.json` files,
checks their npm-style schema, validates referenced assets/workflows/shaders/scenes, and logs
missing folders and schema violations.
"""
from __future__ import annotations
import argparse
import json
import logging
import sys
from pathlib import Path
from typing import Callable, Iterable, Optional, Sequence
COMMON_FOLDERS = ("assets", "scene", "shaders", "workflows")
REQUIRED_FIELDS = ("name", "version", "description", "workflows", "defaultWorkflow")
FIELD_TO_FOLDER = {
"assets": "assets",
"scene": "scene",
"shaders": "shaders",
"workflows": "workflows",
}
logger = logging.getLogger("package_lint")
try:
from jsonschema import Draft7Validator
except ImportError:
Draft7Validator = None
def load_json(path: Path) -> dict:
logger.debug("Reading JSON from %s", path)
with path.open("r", encoding="utf-8") as handle:
return json.load(handle)
def check_paths(
root: Path,
entries: Iterable[str],
key: str,
on_exist: Optional[Callable[[Path, str], None]] = None,
) -> Sequence[str]:
"""Return list of missing files for the given key list and optionally call `on_exist` for existing items."""
missing = []
for rel in entries:
if not isinstance(rel, str):
missing.append(f"{rel!r} (not a string)")
continue
candidate = root / rel
logger.debug("Checking %s entry %s", key, candidate)
if not candidate.exists():
missing.append(str(rel))
continue
if on_exist:
on_exist(candidate, rel)
return missing
def validate_workflow_schema(workflow_path: Path, validator) -> list[str]:
"""Validate a workflow JSON file against the provided schema validator."""
try:
content = load_json(workflow_path)
except json.JSONDecodeError as exc:
return [f"invalid JSON: {exc}"]
issues: list[str] = []
for err in sorted(
validator.iter_errors(content),
key=lambda x: tuple(x.absolute_path),
):
pointer = "/".join(str(part) for part in err.absolute_path) or "<root>"
issues.append(f"schema violation at {pointer}: {err.message}")
return issues
def validate_package(
pkg_root: Path,
pkg_data: dict,
registry_names: Sequence[str],
available_dirs: Sequence[str],
workflow_schema_validator: Optional["Draft7Validator"] = None,
) -> tuple[list[str], list[str]]:
errors: list[str] = []
warnings: list[str] = []
logger.debug("Validating %s", pkg_root)
for field in REQUIRED_FIELDS:
if field not in pkg_data:
errors.append(f"missing required field `{field}`")
workflows = pkg_data.get("workflows")
default_workflow = pkg_data.get("defaultWorkflow")
if workflows and isinstance(workflows, list):
if default_workflow and default_workflow not in workflows:
errors.append("`defaultWorkflow` is not present in `workflows` array")
# schema-like validations
for key in ("workflows", "assets", "scene", "shaders"):
value = pkg_data.get(key)
if value is None:
continue
if not isinstance(value, list):
errors.append(f"`{key}` must be an array if present")
continue
on_exist: Optional[Callable[[Path, str], None]] = None
if key == "workflows" and workflow_schema_validator:
def on_exist(candidate: Path, rel: str) -> None:
schema_issues = validate_workflow_schema(candidate, workflow_schema_validator)
for issue in schema_issues:
errors.append(f"workflow `{rel}`: {issue}")
missing = check_paths(pkg_root, value, key, on_exist=on_exist)
if missing:
warnings.append(f"{key} entries not found: {missing}")
# dependencies validation
deps = pkg_data.get("dependencies", [])
if deps and not isinstance(deps, list):
errors.append("`dependencies` must be an array")
else:
known_names = set(registry_names)
known_names.update(available_dirs)
for dep in deps:
if dep not in known_names:
warnings.append(f"dependency `{dep}` is not known in registry")
# common folder existence
for field, folder in FIELD_TO_FOLDER.items():
entries = pkg_data.get(field) or []
if entries and not (pkg_root / folder).exists():
warnings.append(f"common folder `{folder}` referenced but missing")
return errors, warnings
def main() -> int:
parser = argparse.ArgumentParser(description="Validate package metadata and assets.")
parser.add_argument(
"--packages-root",
type=Path,
default=Path("packages"),
help="Root folder containing package directories",
)
parser.add_argument(
"--workflow-schema",
type=Path,
help="Optional workflow JSON schema (default: config/schema/workflow_v1.schema.json when available)",
)
parser.add_argument(
"--verbose",
action="store_true",
help="Enable debug logging for tracing validation steps",
)
args = parser.parse_args()
logging.basicConfig(
format="%(levelname)s: %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO,
)
if not args.packages_root.exists():
logger.error("packages root %s does not exist", args.packages_root)
return 2
schema_candidate = args.workflow_schema
default_schema = Path("config/schema/workflow_v1.schema.json")
if schema_candidate is None and default_schema.exists():
schema_candidate = default_schema
workflow_validator: Optional["Draft7Validator"] = None
if schema_candidate:
if not schema_candidate.exists():
logger.error("specified workflow schema %s not found", schema_candidate)
return 5
try:
workflow_schema = load_json(schema_candidate)
except json.JSONDecodeError as exc:
logger.error("invalid JSON schema %s: %s", schema_candidate, exc)
return 6
if Draft7Validator is None:
logger.warning("jsonschema dependency not installed; skipping workflow schema validation")
else:
try:
workflow_validator = Draft7Validator(workflow_schema)
except Exception as exc:
logger.error("failed to compile workflow schema %s: %s", schema_candidate, exc)
return 7
package_dirs = [
child
for child in sorted(args.packages_root.iterdir())
if child.is_dir() and (child / "package.json").exists()
]
if not package_dirs:
logger.warning("no package directories with package.json found under %s", args.packages_root)
loaded_packages: list[tuple[Path, dict]] = []
summary_errors = 0
summary_warnings = 0
for pkg_root in package_dirs:
pkg_json_file = pkg_root / "package.json"
try:
pkg_data = load_json(pkg_json_file)
except json.JSONDecodeError as exc:
logger.error("invalid JSON in %s: %s", pkg_json_file, exc)
summary_errors += 1
continue
loaded_packages.append((pkg_root, pkg_data))
registry_names = [
pkg_data.get("name")
for _, pkg_data in loaded_packages
if isinstance(pkg_data.get("name"), str)
]
available_dirs = [entry.name for entry in args.packages_root.iterdir() if entry.is_dir()]
for pkg_root, pkg_data in loaded_packages:
pkg_json_file = pkg_root / "package.json"
errors, warnings = validate_package(
pkg_root,
pkg_data,
registry_names,
available_dirs,
workflow_validator,
)
for err in errors:
logger.error("%s: %s", pkg_json_file, err)
for warn in warnings:
logger.warning("%s: %s", pkg_json_file, warn)
summary_errors += len(errors)
summary_warnings += len(warnings)
logger.info("lint complete: %d errors, %d warnings", summary_errors, summary_warnings)
return 1 if summary_errors else 0
if __name__ == "__main__":
sys.exit(main())