From fdc0880f2b3aaaaec4da38bb286d75ca39d3ba9b Mon Sep 17 00:00:00 2001 From: jremitz Date: Mon, 23 Mar 2026 21:25:22 -0500 Subject: [PATCH 1/4] =?UTF-8?q?Release=20v0.0.33=20=E2=80=94=20smart=20zoo?= =?UTF-8?q?m,=20branding,=20speed=20segments,=20roster=20lookup,=20iterati?= =?UTF-8?q?ons?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Major feature release: - Smart zoom tracking via vision plugins (--smart flag) - Branding overlay on rendered shorts (configurable, --no-branding to disable) - Cross-fade transitions between iterations - Speed segments for variable speed within clips - Player roster lookup via --player-numbers flag - Game finish output relocation to prevent collisions - Plugin doctor checks, tournament context, landscape/portrait hook data Co-Authored-By: Claude --- CHANGELOG.md | 54 +- docs/cli/render.md | 68 +- docs/guide/configuration.md | 125 ++ reeln/__init__.py | 2 +- reeln/cli.py | 25 +- reeln/commands/game.py | 29 +- reeln/commands/render.py | 617 +++++++- reeln/core/config.py | 34 +- reeln/core/debug.py | 60 +- reeln/core/ffmpeg.py | 120 +- reeln/core/finish.py | 57 +- reeln/core/highlights.py | 61 +- reeln/core/iterations.py | 122 +- reeln/core/overlay.py | 37 +- reeln/core/profiles.py | 78 +- reeln/core/prompts.py | 19 +- reeln/core/renderer.py | 76 +- reeln/core/shorts.py | 511 +++++- reeln/core/teams.py | 102 +- reeln/core/templates.py | 2 + reeln/core/zoom.py | 257 +++ reeln/core/zoom_debug.py | 201 +++ reeln/data/templates/goal_overlay.ass | 8 +- reeln/models/branding.py | 20 + reeln/models/config.py | 2 + reeln/models/game.py | 18 + reeln/models/profile.py | 44 + reeln/models/short.py | 11 + reeln/models/team.py | 9 + reeln/models/zoom.py | 41 + reeln/plugins/hooks.py | 1 + reeln/plugins/loader.py | 43 +- registry/plugins.json | 6 +- tests/integration/conftest.py | 1 - tests/integration/test_game_lifecycle.py | 14 +- tests/unit/commands/test_cli.py | 39 + tests/unit/commands/test_game.py | 106 +- tests/unit/commands/test_plugins_cmd.py | 12 +- tests/unit/commands/test_render.py | 1815 +++++++++++++++++++++- tests/unit/core/test_config.py | 84 + tests/unit/core/test_debug.py | 65 +- tests/unit/core/test_ffmpeg.py | 161 +- tests/unit/core/test_finish.py | 137 +- tests/unit/core/test_highlights.py | 260 +++- tests/unit/core/test_iterations.py | 621 +++++++- tests/unit/core/test_overlay.py | 64 +- tests/unit/core/test_plugin_config.py | 8 +- tests/unit/core/test_profiles.py | 88 +- tests/unit/core/test_prompts.py | 70 + tests/unit/core/test_prune.py | 41 + tests/unit/core/test_renderer.py | 164 ++ tests/unit/core/test_shorts.py | 873 ++++++++++- tests/unit/core/test_teams.py | 240 ++- tests/unit/core/test_templates.py | 16 + tests/unit/core/test_zoom.py | 519 +++++++ tests/unit/core/test_zoom_debug.py | 489 ++++++ tests/unit/models/test_branding.py | 63 + tests/unit/models/test_game.py | 195 +++ tests/unit/models/test_profile.py | 147 +- tests/unit/models/test_short.py | 39 + tests/unit/models/test_team.py | 19 + tests/unit/models/test_zoom.py | 134 ++ tests/unit/plugins/test_hooks.py | 3 +- tests/unit/plugins/test_loader.py | 181 ++- 64 files changed, 9176 insertions(+), 352 deletions(-) create mode 100644 reeln/core/zoom.py create mode 100644 reeln/core/zoom_debug.py create mode 100644 reeln/models/branding.py create mode 100644 reeln/models/zoom.py create mode 100644 tests/unit/core/test_zoom.py create mode 100644 tests/unit/core/test_zoom_debug.py create mode 100644 tests/unit/models/test_branding.py create mode 100644 tests/unit/models/test_zoom.py diff --git a/CHANGELOG.md b/CHANGELOG.md index d12e919..4d877e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,59 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/), and this project adheres to [Semantic Versioning](https://semver.org/). -## [Unreleased] +## [0.0.33] - 2026-03-23 + +### Added +- Branding overlay on rendered shorts: shows "reeln v{version} by https://streamn.dad" with a black-bordered white text at the top of the video for the first ~5 seconds with a smooth fade-out — enabled by default, configurable via `branding` config section, disable with `--no-branding` CLI flag +- `BrandingConfig` model (`enabled`, `template`, `duration`) for per-user branding customization +- Bundled `branding.ass` ASS template with `\fad(300,800)` animation and black outline for visibility over any background +- `--no-branding` flag on `render short` and `render preview` commands +- Branding renders only on the first iteration in multi-iteration mode +- Cross-fade transitions between iterations: uses ffmpeg `xfade` + `acrossfade` filters for smooth 0.5s fade transitions instead of hard cuts, with automatic fallback to concat demuxer if xfade fails +- Smart zoom support in the iteration pipeline: `--smart --iterate` now extracts frames once upfront and passes the zoom path through to each iteration's `plan_short()` call +- `speed_segments` in render profiles for variable speed within a single clip — e.g., normal speed → slow motion → normal speed, using the proven split/trim/concat ffmpeg pattern +- `--player-numbers` (`-n`) flag on `render short`, `render preview`, and `render apply` for roster-based player lookup: accepts comma-separated jersey numbers (e.g., `--player-numbers 48,24,2`), looks up names from the team roster CSV, and populates goal scorer and assist overlays automatically +- `--event-type` flag on render commands for scoring team resolution: `HOME_GOAL`/`AWAY_GOAL` determines which team's roster to look up +- `RosterEntry` data model and `load_roster()` / `lookup_players()` / `resolve_scoring_team()` core functions for roster management +- `GameInfo` now persists `level`, `home_slug`, and `away_slug` when `game init --level` is used, enabling roster lookup during rendering +- `build_overlay_context()` accepts optional `scoring_team` parameter to override the default (home team) +- Smart target zoom (`--crop smart`): extracts frames from clips, emits `ON_FRAMES_EXTRACTED` hook for vision plugins (e.g. reeln-plugin-openai) to detect action targets, then builds dynamic ffmpeg crop expressions that smoothly pan across detected targets +- `ZoomPoint`, `ZoomPath`, and `ExtractedFrames` data models for smart zoom contracts +- `ON_FRAMES_EXTRACTED` lifecycle hook for plugins to analyze extracted video frames +- `extract_frames()` method on the Renderer protocol and FFmpegRenderer for frame extraction +- `build_piecewise_lerp()` and `build_smart_crop_filter()` for dynamic ffmpeg crop expressions +- `--zoom-frames` option on `render short` and `render preview` (default 5, range 1-20) +- Zoom debug output: `debug/zoom/zoom_path.json` and frame symlinks when `--debug` is used with smart crop +- Smart pad mode (`--crop smart_pad`): follows action vertically like smart zoom but keeps black bars instead of filling the entire frame — falls back to static pad when no vision plugin provides data +- `build_smart_pad_filter()` for dynamic vertical pad positioning based on zoom path center_y +- Debug crosshair annotations: extracted frames in `debug/zoom/` now include annotated copies with green crop box and red crosshair overlays showing detected center points +- `--scale` option on `render short` and `render preview` (0.5-3.0, default 1.0): zooms in by scaling up the intermediate frame before crop/pad — works with all crop modes including smart tracking +- `--smart` flag on `render short` and `render preview`: enables smart tracking via vision plugin as an orthogonal option, composable with `--crop pad|crop` and `--scale` +- `build_overflow_crop_filter()` for pad + scale > 1.0: crops overflow after scale-up before padding +- Automatic fallback from smart crop to center crop (or smart_pad to static pad) when no vision plugin provides zoom data +- `--no-enforce-hooks` global CLI flag to temporarily disable registry-based hook enforcement for plugins +- `game finish` now relocates segment and highlights outputs from the shared output directory into `game_dir/outputs/`, preventing file collisions across multiple games per day +- `game init` now blocks with a clear error if an unfinished game exists — run `reeln game finish` first +- `GameState` tracks `segment_outputs` and `highlights_output` for file relocation +- `find_unfinished_games()` helper scans for active game directories +- `relocate_outputs()` helper moves output files into the game directory +- `reeln doctor` now collects and runs health checks from plugins that implement `doctor_checks()` +- `doctor` capability added to plugin duck-type detection +- `--tournament` CLI flag on `game init` for optional tournament name/context — flows through to plugins via hook context and overlay templates +- `tournament` and `level` fields now included in template context (`build_base_context()`), available as `{{tournament}}` and `{{level}}` in ASS subtitle templates + +### Changed +- Scale, framing (crop/pad), and smart tracking are now orthogonal axes — any combination works without dedicated enum values +- Short/preview renders now output to a `shorts/` subdirectory by default (e.g., `period-2/shorts/clip_short.mp4`) to prevent segment merges from picking up rendered files + +### Deprecated +- `--crop smart` — use `--crop crop --smart` instead (still works, shows deprecation warning) +- `--crop smart_pad` — use `--crop pad --smart` instead (still works, shows deprecation warning) + +### Fixed +- `team_level` in overlay context now uses the actual team level (e.g., "2016", "bantam") instead of the sport name — previously showed "hockey" instead of the level +- Segment merge and highlights merge output extension now matches input files instead of being hardcoded to `.mkv` +- Highlights merge now discovers segment files with any video extension (`.mp4`, `.mkv`, `.mov`, etc.), not just `.mkv` ## [0.0.32] - 2026-03-15 diff --git a/docs/cli/render.md b/docs/cli/render.md index d7ac39b..dd400c3 100644 --- a/docs/cli/render.md +++ b/docs/cli/render.md @@ -21,6 +21,9 @@ reeln render short [CLIP] [OPTIONS] | `--crop`, `-c` | Crop mode: `pad` (fit with bars) or `crop` (fill and trim). Default: `pad` | | `--anchor`, `-a` | Crop anchor: `center`, `top`, `bottom`, `left`, `right`, or custom `x,y` (0.0–1.0). Default: `center` | | `--pad-color` | Pad bar color (default: `black`) | +| `--scale` | Content scale, 0.5–3.0 (default: `1.0`). Values > 1.0 zoom in on the source. | +| `--smart` | Enable smart tracking via vision plugin (requires an `ON_FRAMES_EXTRACTED` handler). | +| `--zoom-frames` | Number of frames to extract for smart zoom analysis, 1–20 (default: `5`). | | `--speed` | Playback speed, 0.5–2.0 (default: `1.0`) | | `--lut` | LUT file for color grading (`.cube` or `.3dl`) | | `--subtitle` | ASS subtitle overlay file (`.ass`) | @@ -29,8 +32,11 @@ reeln render short [CLIP] [OPTIONS] | `--render-profile`, `-r` | Named render profile from config | | `--player` | Player name for overlay (populates `{{player}}` / `{{goal_scorer_text}}` in subtitle templates) | | `--assists` | Assists, comma-separated (populates `{{goal_assist_1}}` / `{{goal_assist_2}}` in subtitle templates) | +| `--player-numbers`, `-n` | Jersey numbers: `scorer[,assist1[,assist2]]`. Looked up from team roster CSV. | +| `--event-type` | Event type for scoring team resolution (`HOME_GOAL`, `AWAY_GOAL`). Used with `--player-numbers`. | | `--iterate` | Multi-iteration mode — apply iteration profiles from config | | `--debug` | Write debug artifacts (ffmpeg commands, metadata) to `{game_dir}/debug/` | +| `--no-branding` | Disable the default branding overlay | | `--profile` | Named config profile | | `--config` | Explicit config file path | | `--dry-run` | Show render plan without executing | @@ -41,6 +47,23 @@ When `--iterate` is provided, reeln looks up the iteration profile list for the When `--player` and/or `--assists` are provided, they populate the overlay template context — useful for rendering goal overlays without going through the game event tagging system. These flags override any player/assists data from linked game events. They require a `--render-profile` with a `subtitle_template` to take effect. +#### Player number roster lookup + +When `--player-numbers` is provided (e.g. `--player-numbers 48,24,2`), reeln looks up player names from the team roster CSV: + +1. The first number is the **goal scorer**, remaining numbers are **assists** +2. The **scoring team** is determined from `--event-type`: `HOME_GOAL`/`home_goal` → home team, `AWAY_GOAL`/`away_goal` → away team, anything else defaults to home +3. The team profile is loaded using `level` and team slug from `game.json` +4. Player names are formatted as `#48 Smith` (number + last name) +5. Unknown numbers fall back to `#48` with a warning + +Requirements: +- A game directory (`--game-dir` or auto-detected) +- The game must have been initialized with `--level` (to store team profile references) +- The team profile must have a `roster_path` pointing to a valid CSV file + +If `--player` or `--assists` are also provided, they take precedence over the roster lookup. + Builds an ffmpeg filter graph to reframe the input clip as a vertical short suitable for social media platforms (YouTube Shorts, Instagram Reels, TikTok). #### Auto-discovery @@ -64,9 +87,50 @@ Encoding parameters (codec, preset, CRF, audio codec, audio bitrate) flow from t - **`pad`** — Fits the entire source frame into the target dimensions with letterbox/pillarbox bars. Nothing is cropped. The `--pad-color` option controls bar color. - **`crop`** — Fills the target dimensions by cropping the source. The `--anchor` option controls which region of the source is kept. +#### Smart tracking + +Rendering has three orthogonal axes that compose independently: + +- **Framing** (`--crop pad|crop`) — how the source fits the target dimensions +- **Scale** (`--scale`) — content zoom level +- **Tracking** (`--smart`) — dynamic crop/pan following the action + +When `--smart` is enabled, reeln extracts frames from the clip and emits the `ON_FRAMES_EXTRACTED` hook. A vision plugin (e.g. `reeln-plugin-openai`) analyzes the frames and returns a zoom path — a sequence of (timestamp, center_x, center_y) points describing where the action is. The render filter chain then dynamically adjusts the crop or pad position to follow the action. + +- **Smart crop** (`--crop crop --smart`) — fills the target by cropping, with the crop window tracking the action point. +- **Smart pad** (`--crop pad --smart`) — fits the source with pillarbox bars, panning horizontally to keep the action centered. Vertical position stays fixed (vertical panning is disorienting in pad mode). + +If no vision plugin handles `ON_FRAMES_EXTRACTED`, `--smart` falls back to static center positioning with a warning. + +:::{note} +The legacy crop modes `--crop smart` and `--crop smart_pad` still work but are deprecated. Use `--crop crop --smart` and `--crop pad --smart` instead. +::: + +```bash +# Smart crop — fills 9:16, tracking the action +reeln render short clip.mkv --crop crop --smart + +# Smart pad — fits with bars, panning horizontally +reeln render short clip.mkv --crop pad --smart + +# Smart crop with zoom +reeln render short clip.mkv --crop crop --smart --scale 1.5 + +# More frames for finer tracking +reeln render short clip.mkv --smart --zoom-frames 10 +``` + +#### Variable speed segments + +For variable speed within a single clip (e.g., normal → slow motion → normal), use `speed_segments` in a render profile. This is a profile-only feature — there is no CLI flag. See {doc}`/guide/configuration` for details. + +:::{note} +`speed_segments` cannot be combined with `--smart` tracking. Static crop/pad with speed segments works. +::: + #### Filter chain order -LUT (color grade) → speed (`setpts`) → scale → pad/crop → subtitle overlay. +LUT (color grade) → speed (`setpts`) → scale → overflow crop (pad + scale > 1.0) → crop/pad → final scale (crop only) → subtitle overlay. **Examples:** @@ -101,7 +165,7 @@ Generate a fast low-resolution preview of a clip. reeln render preview [CLIP] [OPTIONS] ``` -Accepts the same options as `render short` (including `--render-profile`, `--iterate`, and `--debug`). Produces a scaled-down, lower-quality version for quick review before committing to a full render. +Accepts the same options as `render short` (including `--render-profile`, `--iterate`, `--scale`, `--smart`, `--zoom-frames`, and `--debug`). Produces a scaled-down, lower-quality version for quick review before committing to a full render. Preview differences: - Uses `ultrafast` preset (vs `medium`) diff --git a/docs/guide/configuration.md b/docs/guide/configuration.md index 9f5b0ea..7da3b32 100644 --- a/docs/guide/configuration.md +++ b/docs/guide/configuration.md @@ -109,6 +109,8 @@ Named render profiles define reusable rendering parameter overrides. Add a `rend | `crop_mode` | string | `"pad"` or `"crop"` (short-form only) | | `anchor_x` | float | Crop anchor X position, 0.0–1.0 (short-form only) | | `anchor_y` | float | Crop anchor Y position, 0.0–1.0 (short-form only) | +| `scale` | float | Content scale, 0.5–3.0 (default: 1.0). Values > 1.0 zoom in. | +| `smart` | bool | Enable smart tracking via vision plugin | | `pad_color` | string | Pad bar color (short-form only) | | `codec` | string | Video codec override | | `preset` | string | Encoder preset override | @@ -118,6 +120,35 @@ Named render profiles define reusable rendering parameter overrides. Add a `rend All fields are optional — `null` or omitted means "inherit from base config". +#### Variable speed (speed_segments) + +For variable-speed rendering within a single clip, use `speed_segments` instead of the scalar `speed` field. Each segment defines a speed and a source-time boundary: + +```json +{ + "render_profiles": { + "slowmo-middle": { + "speed_segments": [ + {"until": 5.0, "speed": 1.0}, + {"until": 8.0, "speed": 0.5}, + {"speed": 1.0} + ] + } + } +} +``` + +This plays the first 5 seconds at normal speed, then 3 seconds at half speed, then the rest at normal speed. The last segment must omit `until` (it runs to the end of the clip). + +Rules: +- At least 2 segments required (use scalar `speed` for uniform speed) +- `until` values must be strictly increasing and positive +- Speeds must be in the range 0.25–4.0 +- `speed` and `speed_segments` are mutually exclusive — set one or the other, not both +- `speed_segments` cannot be combined with `--smart` tracking + +`speed_segments` is profile-only — there is no CLI flag for it. Configure it in a render profile and use `--render-profile` to apply it. + Profiles are used with `--render-profile` on `render short`, `render preview`, `render apply`, `game segment`, and `game highlights`. #### Builtin templates @@ -152,6 +183,37 @@ reeln render short clip.mkv --render-profile player-overlay \ reeln render short clip.mkv --iterate --game-dir . --event ``` +#### Smart tracking + +Smart tracking (`--smart` or `"smart": true` in a profile) requires a vision plugin that handles the `ON_FRAMES_EXTRACTED` hook. Without one, `--smart` falls back to static center positioning with a warning. + +The [reeln-plugin-openai](https://github.com/StreamnDad/reeln-plugin-openai) package provides smart zoom via OpenAI's vision API. Enable it in plugin settings: + +```json +{ + "plugins": { + "enabled": ["openai"], + "settings": { + "openai": { + "api_key": "sk-...", + "smart_zoom_enabled": true, + "smart_zoom_model": "gpt-4o" + } + } + } +} +``` + +When smart tracking is active, the render pipeline: + +1. Extracts frames from the clip (`--zoom-frames` controls how many, default 5) +2. Emits `ON_FRAMES_EXTRACTED` — the vision plugin analyzes frames and returns a zoom path +3. Builds dynamic ffmpeg expressions that follow the action throughout the clip + +Smart tracking composes with both crop modes: +- **crop + smart** — the crop window tracks the action point +- **pad + smart** — pillarbox bars pan horizontally to center the action (vertical position stays fixed) + ### Iterations section The `iterations` section maps event types to ordered lists of profile names. This is used for multi-iteration rendering where each event type gets a different sequence of render passes: @@ -183,6 +245,28 @@ reeln game highlights --iterate Each profile in the list is applied in order, and the iteration outputs are concatenated end-to-end into a single final file. For example, a goal event with profiles `["fullspeed", "slowmo", "goal-overlay"]` produces a video that plays the clip at full speed, then slow motion, then with the goal overlay — all stitched together automatically. +### Branding section + +The `branding` section controls the branding overlay shown at the start of rendered shorts: + +```json +{ + "branding": { + "enabled": true, + "template": "builtin:branding", + "duration": 5.0 + } +} +``` + +| Key | Default | Description | +|---|---|---| +| `enabled` | `true` | Whether to show branding overlay | +| `template` | `"builtin:branding"` | Template path — `"builtin:branding"` for the default, or a path to a custom `.ass` file | +| `duration` | `5.0` | How long the branding is visible in seconds | + +The builtin branding overlay displays "reeln v{version} by https://streamn.dad" in bold white text with a black outline at the top of the video, fading in over 300ms and fading out over 800ms. To disable branding entirely, set `enabled` to `false` or use `--no-branding` on the CLI. + ### Orchestration section The `orchestration` section controls the plugin pipeline behavior: @@ -347,6 +431,47 @@ When any command is run with `--debug`, pipeline debug artifacts are written to Debug artifacts are automatically removed by `game prune` (no `--all` flag needed). Open `debug/index.html` in a browser for a quick overview of all operations performed on a game. +## Team profiles and rosters + +Team profiles are stored as JSON files in the config directory under `teams/{level}/{slug}.json`. When you initialize a game with `--level`, the team level and slugs are persisted in `game.json`, enabling roster-based player lookup during rendering. + +### Setting up rosters for player number lookup + +1. **Create team profiles** with `roster_path` pointing to a CSV file: + +```json +{ + "team_name": "Eagles", + "short_name": "EGL", + "level": "bantam", + "roster_path": "/path/to/eagles_roster.csv", + "colors": ["#C8102E", "#000000"] +} +``` + +2. **Create the roster CSV** with `number`, `name`, and `position` columns: + +```csv +number,name,position +48,John Smith,C +24,Jane Doe,D +2,Bob Jones,RW +``` + +3. **Initialize games with `--level`** to persist team profile references: + +```bash +reeln game init eagles bears --level bantam --sport hockey +``` + +4. **Use `--player-numbers` during rendering** to look up players from the roster: + +```bash +reeln render short clip.mkv --player-numbers 48,24,2 --event-type HOME_GOAL -r overlay +``` + +This resolves to `#48 Smith` (scorer) with assists `#24 Doe` and `#2 Jones`. + ## Schema versioning Every config file includes a `config_version` field. When the schema changes, reeln provides migration functions to upgrade configs automatically. diff --git a/reeln/__init__.py b/reeln/__init__.py index 35854c6..fb11a43 100644 --- a/reeln/__init__.py +++ b/reeln/__init__.py @@ -2,4 +2,4 @@ from __future__ import annotations -__version__ = "0.0.32" +__version__ = "0.0.33" diff --git a/reeln/cli.py b/reeln/cli.py index 0bc6d45..4ef44ea 100644 --- a/reeln/cli.py +++ b/reeln/cli.py @@ -88,10 +88,20 @@ def main( envvar="REELN_LOG_LEVEL", help="Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL.", ), + no_enforce_hooks: bool = typer.Option( + False, + "--no-enforce-hooks", + help="Disable registry-based hook enforcement for plugins.", + ), ) -> None: """Platform-agnostic CLI toolkit for livestreamers.""" import logging + if no_enforce_hooks: + from reeln.plugins.loader import set_enforce_hooks_override + + set_enforce_hooks_override(disable=True) + numeric_level = getattr(logging, log_level.upper(), None) if not isinstance(numeric_level, int): typer.echo(f"Invalid log level: {log_level}", err=True) @@ -104,10 +114,21 @@ def doctor( profile: str | None = typer.Option(None, "--profile", help="Named config profile."), config: Path | None = typer.Option(None, "--config", help="Explicit config file path."), ) -> None: - """Run health checks: ffmpeg, config, permissions.""" + """Run health checks: ffmpeg, config, permissions, plugins.""" + from reeln.core.config import load_config from reeln.core.doctor import doctor_exit_code, format_results, run_doctor + from reeln.models.doctor import DoctorCheck + from reeln.plugins.loader import activate_plugins, collect_doctor_checks + + extra: list[DoctorCheck] = [] + try: + cfg = load_config(path=config, profile=profile) + loaded = activate_plugins(cfg.plugins) + extra = collect_doctor_checks(loaded) + except Exception: + pass # config/plugin errors are reported by run_doctor's own checks - results = run_doctor(config_path=config, profile=profile) + results = run_doctor(config_path=config, profile=profile, extra_checks=extra) lines = format_results(results) for line in lines: typer.echo(line) diff --git a/reeln/commands/game.py b/reeln/commands/game.py index ddcdfbc..3c2a3c0 100644 --- a/reeln/commands/game.py +++ b/reeln/commands/game.py @@ -13,6 +13,7 @@ from reeln.core.ffmpeg import discover_ffmpeg from reeln.core.highlights import init_game, merge_game_highlights, process_segment from reeln.core.prompts import collect_game_info_interactive +from reeln.core.teams import slugify from reeln.models.game import GameInfo from reeln.plugins.loader import activate_plugins @@ -139,6 +140,7 @@ def init( period_length: int = typer.Option(0, "--period-length", help="Period/segment length in minutes (0 = not set)."), description: str = typer.Option("", "--description", "-d", help="Broadcast description."), thumbnail: str = typer.Option("", "--thumbnail", help="Thumbnail image file path."), + tournament: str = typer.Option("", "--tournament", help="Tournament name (optional context for plugins)."), output_dir: Path | None = typer.Option(None, "--output-dir", "-o", help="Base output directory."), profile: str | None = typer.Option(None, "--profile", help="Named config profile."), config_path: Path | None = typer.Option(None, "--config", help="Explicit config file path."), @@ -168,9 +170,11 @@ def init( game_date=game_date, venue=None if venue == "" else venue, game_time=None if game_time == "" else game_time, + level=level, period_length=None if period_length == 0 else period_length, description=None if description == "" else description, thumbnail=None if thumbnail == "" else thumbnail, + tournament=None if tournament == "" else tournament, ) except PromptAborted: raise typer.Abort() from None @@ -190,6 +194,10 @@ def init( period_length=info["period_length"], description=info["description"], thumbnail=info["thumbnail"], + tournament=info["tournament"], + level=level or "", + home_slug=slugify(info["home"]) if level else "", + away_slug=slugify(info["away"]) if level else "", ) else: # Non-interactive mode — use CLI args directly @@ -197,14 +205,21 @@ def init( home_team = home away_team = away + resolved_level = "" + home_slug = "" + away_slug = "" + if level is not None: try: - from reeln.core.teams import load_team_profile, slugify + from reeln.core.teams import load_team_profile - home_profile = load_team_profile(level, slugify(home)) - away_profile = load_team_profile(level, slugify(away)) + home_slug = slugify(home) + away_slug = slugify(away) + home_profile = load_team_profile(level, home_slug) + away_profile = load_team_profile(level, away_slug) home_team = home_profile.team_name away_team = away_profile.team_name + resolved_level = level except ReelnError as exc: typer.echo(f"Error: {exc}", err=True) raise typer.Exit(code=1) from exc @@ -219,12 +234,14 @@ def init( period_length=period_length, description=description, thumbnail=thumbnail, + tournament=tournament, + level=resolved_level, + home_slug=home_slug, + away_slug=away_slug, ) try: - _, messages = init_game( - base, game_info, dry_run=dry_run, home_profile=home_profile, away_profile=away_profile - ) + _, messages = init_game(base, game_info, dry_run=dry_run, home_profile=home_profile, away_profile=away_profile) except ReelnError as exc: typer.echo(f"Error: {exc}", err=True) raise typer.Exit(code=1) from exc diff --git a/reeln/commands/render.py b/reeln/commands/render.py index 2e3eda7..fcc0057 100644 --- a/reeln/commands/render.py +++ b/reeln/commands/render.py @@ -4,6 +4,7 @@ from datetime import UTC, datetime from pathlib import Path +from typing import Any import typer @@ -82,8 +83,11 @@ def _find_latest_video(directory: Path, source_glob: str) -> Path: return max(candidates, key=lambda p: p.stat().st_mtime) -def _find_game_dir(output_dir: Path | None) -> Path | None: - """Try to find the latest game directory for render tracking. +def _find_game_dir(output_dir: Path | None, clip: Path | None = None) -> Path | None: + """Try to find the game directory for render tracking. + + When *clip* is provided, prefer the game dir that contains the clip. + Falls back to the most recently modified ``game.json`` otherwise. Returns ``None`` if no game directory can be found (no error). """ @@ -96,12 +100,27 @@ def _find_game_dir(output_dir: Path | None) -> Path | None: candidates = [f for f in output_dir.iterdir() if f.is_dir() and (f / "game.json").is_file()] if not candidates: return None + + # Prefer the game dir that contains the clip + if clip is not None: + resolved_clip = clip.resolve() + for candidate in candidates: + try: + if resolved_clip.is_relative_to(candidate.resolve()): + return candidate + except (ValueError, OSError): + continue + return max(candidates, key=lambda p: (p / "game.json").stat().st_mtime) def _default_output(input_path: Path, suffix: str) -> Path: - """Generate default output path from input path and suffix.""" - return input_path.parent / f"{input_path.stem}{suffix}.mp4" + """Generate default output path in a ``shorts/`` subdirectory. + + Renders go into ``/shorts/`` to keep the source directory clean + and prevent segment merges from picking up rendered files. + """ + return input_path.parent / "shorts" / f"{input_path.stem}{suffix}.mp4" def _record_render( @@ -147,6 +166,75 @@ def _record_render( save_game_state(state, game_dir) +def _resolve_player_numbers( + player_numbers: str, + event_type: str | None, + game_dir: Path | None, + config_output_dir: Path | None, + clip: Path | None, +) -> tuple[str, str | None, str | None]: + """Resolve --player-numbers to (scorer_display, assists_csv, scoring_team_name). + + Loads game state, determines scoring team, loads roster, and looks up numbers. + Returns the scorer display string, a comma-separated assists string (or None), + and the scoring team name (or None). + """ + from reeln.core.highlights import load_game_state + from reeln.core.teams import load_roster, load_team_profile, lookup_players, resolve_scoring_team + + # 1. Find game directory + resolved_game_dir = game_dir or _find_game_dir(config_output_dir, clip) + if resolved_game_dir is None: + typer.echo( + "Error: --player-numbers requires a game directory (use --game-dir or run from a game workspace)", + err=True, + ) + raise typer.Exit(code=1) + + # 2. Load game state + try: + state = load_game_state(resolved_game_dir) + except ReelnError as exc: + typer.echo(f"Error: {exc}", err=True) + raise typer.Exit(code=1) from exc + + game_info = state.game_info + if not game_info.level or not game_info.home_slug: + typer.echo( + "Error: --player-numbers requires team profiles (game must be initialized with --level)", + err=True, + ) + raise typer.Exit(code=1) + + # 3. Determine scoring team + team_name, team_slug, level = resolve_scoring_team(event_type or "", game_info) + + # 4. Load team profile → roster + try: + team_profile = load_team_profile(level, team_slug) + except ReelnError: + typer.echo(f"Error: Team profile not found: {level}/{team_slug}", err=True) + raise typer.Exit(code=1) from None + + if not team_profile.roster_path: + typer.echo(f"Error: No roster file configured for team '{team_name}'", err=True) + raise typer.Exit(code=1) + + roster_path = Path(team_profile.roster_path) + try: + roster = load_roster(roster_path) + except ReelnError as exc: + typer.echo(f"Error: {exc}", err=True) + raise typer.Exit(code=1) from exc + + # 5. Look up numbers + numbers = [n.strip() for n in player_numbers.split(",") if n.strip()] + scorer, assist_list = lookup_players(roster, numbers, team_name) + + assists_csv = ", ".join(assist_list) if assist_list else None + return (scorer, assists_csv, team_name) + + def _do_short( clip: Path | None, output: Path | None, @@ -156,6 +244,8 @@ def _do_short( anchor: str, pad_color: str, speed: float, + scale: float, + smart: bool, lut: Path | None, subtitle: Path | None, game_dir: Path | None, @@ -170,6 +260,10 @@ def _do_short( debug: bool = False, player: str | None = None, assists: str | None = None, + zoom_frames: int | None = None, + player_numbers: str | None = None, + event_type: str | None = None, + no_branding: bool = False, ) -> None: """Shared implementation for short and preview commands.""" from reeln.core.ffmpeg import discover_ffmpeg @@ -184,6 +278,21 @@ def _do_short( activate_plugins(config.plugins) + # Resolve --player-numbers before anything else + _scoring_team_name: str | None = None + if player_numbers is not None: + scorer, assists_from_roster, _scoring_team_name = _resolve_player_numbers( + player_numbers, event_type, game_dir, config.paths.output_dir, clip + ) + # Explicit --player/--assists take precedence over roster lookup + if player is None: + player = scorer + if assists is None: + assists = assists_from_roster + # Auto-apply player-overlay profile when no explicit -r is given + if render_profile_name is None and not iterate and "player-overlay" in config.render_profiles: + render_profile_name = "player-overlay" + if clip is None: source_dir = config.paths.source_dir if source_dir is None: @@ -205,9 +314,34 @@ def _do_short( try: crop_mode = CropMode(crop) except ValueError: - typer.echo(f"Error: Unknown crop mode: {crop!r}. Use pad or crop.", err=True) + typer.echo(f"Error: Unknown crop mode: {crop!r}. Use pad, crop, smart, or smart_pad.", err=True) raise typer.Exit(code=1) from None + # Deprecated --crop smart / --crop smart_pad aliases + effective_smart = smart + if crop_mode == CropMode.SMART: + import warnings + + warnings.warn( + "--crop smart is deprecated, use --crop crop --smart instead", + DeprecationWarning, + stacklevel=1, + ) + typer.echo("Warning: --crop smart is deprecated. Use --crop crop --smart instead.", err=True) + effective_smart = True + elif crop_mode == CropMode.SMART_PAD: + import warnings + + warnings.warn( + "--crop smart_pad is deprecated, use --crop pad --smart instead", + DeprecationWarning, + stacklevel=1, + ) + typer.echo("Warning: --crop smart_pad is deprecated. Use --crop pad --smart instead.", err=True) + effective_smart = True + + resolved_zoom_frames = zoom_frames if zoom_frames is not None else 5 + short_config = ShortConfig( input=clip, output=out, @@ -216,6 +350,8 @@ def _do_short( crop_mode=crop_mode, anchor_x=anchor_x, anchor_y=anchor_y, + scale=scale, + smart=effective_smart, pad_color=pad_color, speed=speed, lut=lut, @@ -225,6 +361,7 @@ def _do_short( crf=config.video.crf, audio_codec=config.video.audio_codec, audio_bitrate=config.video.audio_bitrate, + smart_zoom_frames=resolved_zoom_frames, ) # Apply render profile overlay if specified @@ -243,7 +380,7 @@ def _do_short( if rp.subtitle_template is not None: game_event = None game_info = None - resolved_game_dir = game_dir or _find_game_dir(config.paths.output_dir) + resolved_game_dir = game_dir or _find_game_dir(config.paths.output_dir, clip) if resolved_game_dir is not None: try: from reeln.core.highlights import load_game_state @@ -251,9 +388,7 @@ def _do_short( state = load_game_state(resolved_game_dir) game_info = state.game_info if event_id is not None: - game_event = next( - (e for e in state.events if e.id == event_id), None - ) + game_event = next((e for e in state.events if e.id == event_id), None) except ReelnError: pass @@ -272,70 +407,224 @@ def _do_short( from reeln.core.overlay import build_overlay_context dur = _probe_dur(_disc(), clip) or 10.0 - ctx = build_overlay_context(ctx, duration=dur, event_metadata=event_meta) + ctx = build_overlay_context( + ctx, + duration=dur, + event_metadata=event_meta, + scoring_team=_scoring_team_name, + ) - rendered_subtitle = resolve_subtitle_for_profile( - rp, ctx, (output or _default_output(clip, "_short")).parent - ) + subtitle_dir = (output or _default_output(clip, "_short")).parent + subtitle_dir.mkdir(parents=True, exist_ok=True) + rendered_subtitle = resolve_subtitle_for_profile(rp, ctx, subtitle_dir) + + short_config = apply_profile_to_short(short_config, rp, rendered_subtitle=rendered_subtitle) - short_config = apply_profile_to_short( - short_config, rp, rendered_subtitle=rendered_subtitle + # Resolve branding overlay + branding_subtitle: Path | None = None + if not no_branding and config.branding.enabled: + from reeln.core.branding import resolve_branding + + branding_dir = (output or _default_output(clip, "_short")).parent + branding_dir.mkdir(parents=True, exist_ok=True) + try: + branding_subtitle = resolve_branding(config.branding, branding_dir) + except ReelnError as exc: + typer.echo(f"Warning: Failed to resolve branding, continuing without: {exc}", err=True) + if branding_subtitle is not None: + short_config = ShortConfig( + input=short_config.input, + output=short_config.output, + width=short_config.width, + height=short_config.height, + crop_mode=short_config.crop_mode, + anchor_x=short_config.anchor_x, + anchor_y=short_config.anchor_y, + scale=short_config.scale, + smart=short_config.smart, + pad_color=short_config.pad_color, + speed=short_config.speed, + lut=short_config.lut, + subtitle=short_config.subtitle, + codec=short_config.codec, + preset=short_config.preset, + crf=short_config.crf, + audio_codec=short_config.audio_codec, + audio_bitrate=short_config.audio_bitrate, + speed_segments=short_config.speed_segments, + smart_zoom_frames=short_config.smart_zoom_frames, + branding=branding_subtitle, ) - # Multi-iteration mode - if iterate: - from reeln.core.iterations import render_iterations - from reeln.core.profiles import profiles_for_event - from reeln.core.templates import build_base_context + # Smart zoom: extract frames before iterate or single render + import tempfile - game_event = None - game_info = None - resolved_game_dir = game_dir or _find_game_dir(config.paths.output_dir) - if resolved_game_dir is not None: + extracted_dir: Path | None = None + extracted_frames = None + zoom_path = None + plugin_debug_data: dict[str, object] | None = None + try: + if effective_smart: + from reeln.models.zoom import ZoomPath + from reeln.plugins.hooks import Hook, HookContext + from reeln.plugins.registry import get_registry + + try: + ffmpeg_path = discover_ffmpeg() + renderer = FFmpegRenderer(ffmpeg_path) + except ReelnError as exc: + typer.echo(f"Error: {exc}", err=True) + raise typer.Exit(code=1) from exc + + extracted_dir = Path(tempfile.mkdtemp(prefix="reeln_frames_")) + try: + extracted_frames = renderer.extract_frames( + clip, count=short_config.smart_zoom_frames, output_dir=extracted_dir + ) + frames = extracted_frames + except ReelnError as exc: + typer.echo(f"Error extracting frames: {exc}", err=True) + raise typer.Exit(code=1) from exc + + shared: dict[str, object] = {} + hook_ctx = HookContext( + hook=Hook.ON_FRAMES_EXTRACTED, + data={ + "frames": frames, + "input_path": clip, + "crop_mode": "smart", + }, + shared=shared, + ) + get_registry().emit(Hook.ON_FRAMES_EXTRACTED, hook_ctx) + + smart_zoom_data = shared.get("smart_zoom") + if isinstance(smart_zoom_data, dict): + zoom_path = smart_zoom_data.get("zoom_path") + debug_from_plugin = smart_zoom_data.get("debug") + if isinstance(debug_from_plugin, dict): + plugin_debug_data = debug_from_plugin + + if zoom_path is None or not isinstance(zoom_path, ZoomPath): + from reeln.core.shorts import _resolve_smart + + fallback_mode, _ = _resolve_smart(crop_mode, False) + typer.echo( + f"Warning: No smart zoom data from plugins, falling back to {fallback_mode.value}", + err=True, + ) + zoom_path = None + short_config = ShortConfig( + input=short_config.input, + output=short_config.output, + width=short_config.width, + height=short_config.height, + crop_mode=fallback_mode, + anchor_x=short_config.anchor_x, + anchor_y=short_config.anchor_y, + scale=short_config.scale, + smart=False, + pad_color=short_config.pad_color, + speed=short_config.speed, + lut=short_config.lut, + subtitle=short_config.subtitle, + codec=short_config.codec, + preset=short_config.preset, + crf=short_config.crf, + audio_codec=short_config.audio_codec, + audio_bitrate=short_config.audio_bitrate, + smart_zoom_frames=short_config.smart_zoom_frames, + ) + + source_fps = extracted_frames.fps if extracted_frames is not None else 30.0 + + # Load game state once — used by both iterate and single-render paths + # so POST_RENDER hooks receive game_info for metadata generation. + from reeln.models.game import GameEvent, GameInfo + + render_game_event: GameEvent | None = None + render_game_info: GameInfo | None = None + render_game_dir = game_dir or _find_game_dir(config.paths.output_dir, clip) + if render_game_dir is not None: try: from reeln.core.highlights import load_game_state - state = load_game_state(resolved_game_dir) - game_info = state.game_info + _state = load_game_state(render_game_dir) + render_game_info = _state.game_info if event_id is not None: - game_event = next((e for e in state.events if e.id == event_id), None) + render_game_event = next((e for e in _state.events if e.id == event_id), None) except ReelnError: pass - profile_list = profiles_for_event(config, game_event) - if profile_list: - iter_ctx: TemplateContext | None = build_base_context(game_info, game_event) if game_info else None - if player is not None and iter_ctx is not None: - iter_ctx = TemplateContext(variables={**iter_ctx.variables, "player": player}) - event_meta = dict(game_event.metadata) if game_event else None - if assists is not None: - event_meta = event_meta or {} - event_meta["assists"] = assists - try: - ffmpeg_path = discover_ffmpeg() - _, messages = render_iterations( - clip, - profile_list, - config, - ffmpeg_path, - out, - context=iter_ctx, - event_metadata=event_meta, - is_short=True, - short_config=short_config, - dry_run=dry_run, + # Multi-iteration mode + if iterate: + from reeln.core.iterations import render_iterations + from reeln.core.profiles import profiles_for_event + from reeln.core.templates import build_base_context + + profile_list = profiles_for_event(config, render_game_event) + if profile_list: + iter_ctx: TemplateContext | None = ( + build_base_context(render_game_info, render_game_event) + if render_game_info else None ) - except ReelnError as exc: - typer.echo(f"Error: {exc}", err=True) - raise typer.Exit(code=1) from exc - for msg in messages: - typer.echo(msg) - return - typer.echo("Warning: No iteration profiles configured, using single render", err=True) + if player is not None and iter_ctx is not None: + iter_ctx = TemplateContext(variables={**iter_ctx.variables, "player": player}) + event_meta = dict(render_game_event.metadata) if render_game_event else None + if assists is not None: + event_meta = event_meta or {} + event_meta["assists"] = assists + try: + ffmpeg_path = discover_ffmpeg() + _, messages = render_iterations( + clip, + profile_list, + config, + ffmpeg_path, + out, + context=iter_ctx, + event_metadata=event_meta, + is_short=True, + short_config=short_config, + zoom_path=zoom_path, + source_fps=source_fps, + dry_run=dry_run, + game_info=render_game_info, + game_event=render_game_event, + player=player, + assists=assists, + ) + except ReelnError as exc: + typer.echo(f"Error: {exc}", err=True) + raise typer.Exit(code=1) from exc + for msg in messages: + typer.echo(msg) + + # Debug output for iterate path + if debug: + resolved_gd = game_dir or _find_game_dir(config.paths.output_dir, clip) + if resolved_gd is not None and extracted_frames is not None: + from reeln.core.zoom_debug import write_zoom_debug + + write_zoom_debug( + resolved_gd, + extracted_frames, + zoom_path, + short_config.width, + short_config.height, + ffmpeg_path=ffmpeg_path, + plugin_debug=plugin_debug_data, + ) + typer.echo(f"Debug: {resolved_gd / 'debug'}") + + return + typer.echo("Warning: No iteration profiles configured, using single render", err=True) - try: try: - plan = plan_preview(short_config) if is_preview else plan_short(short_config) + if is_preview: + plan = plan_preview(short_config) + else: + plan = plan_short(short_config, zoom_path=zoom_path, source_fps=source_fps) except ReelnError as exc: typer.echo(f"Error: {exc}", err=True) raise typer.Exit(code=1) from exc @@ -344,6 +633,8 @@ def _do_short( typer.echo(f"Output: {out}") typer.echo(f"Size: {plan.width}x{plan.height}") typer.echo(f"Crop mode: {short_config.crop_mode.value}") + if short_config.scale != 1.0: + typer.echo(f"Scale: {short_config.scale}x") if short_config.speed != 1.0: typer.echo(f"Speed: {short_config.speed}x") if short_config.lut: @@ -352,19 +643,48 @@ def _do_short( typer.echo(f"Subtitle: {short_config.subtitle}") if render_profile_name is not None: typer.echo(f"Profile: {render_profile_name}") + if zoom_path is not None: + typer.echo(f"Smart zoom: {len(zoom_path.points)} target points") + if debug and plan.filter_complex is not None: + typer.echo(f"Filter complex: {plan.filter_complex}") if dry_run: typer.echo("Dry run — no files written") return + out.parent.mkdir(parents=True, exist_ok=True) + + # Emit hooks manually so POST_RENDER includes game_info for plugins + from reeln.plugins.hooks import Hook as _RHook + from reeln.plugins.hooks import HookContext as _RHookCtx + from reeln.plugins.registry import get_registry as _get_reg + + _get_reg().emit( + _RHook.PRE_RENDER, + _RHookCtx(hook=_RHook.PRE_RENDER, data={"plan": plan}), + ) try: ffmpeg_path = discover_ffmpeg() renderer = FFmpegRenderer(ffmpeg_path) - result = renderer.render(plan) + result = renderer.render(plan, emit_hooks=False) except ReelnError as exc: typer.echo(f"Error: {exc}", err=True) raise typer.Exit(code=1) from exc + _post_data: dict[str, Any] = {"plan": plan, "result": result} + if render_game_info is not None: + _post_data["game_info"] = render_game_info + if render_game_event is not None: + _post_data["game_event"] = render_game_event + if player is not None: + _post_data["player"] = player + if assists is not None: + _post_data["assists"] = assists + _get_reg().emit( + _RHook.POST_RENDER, + _RHookCtx(hook=_RHook.POST_RENDER, data=_post_data), + ) + if result.duration_seconds is not None: typer.echo(f"Duration: {result.duration_seconds:.1f}s") if result.file_size_bytes is not None: @@ -372,7 +692,7 @@ def _do_short( typer.echo(f"File size: {size_mb:.1f} MB") typer.echo("Render complete") - resolved_game_dir = game_dir or _find_game_dir(config.paths.output_dir) + resolved_game_dir = game_dir or _find_game_dir(config.paths.output_dir, clip) if resolved_game_dir is not None: try: _record_render( @@ -391,6 +711,16 @@ def _do_short( if debug and result.ffmpeg_command: from reeln.core.debug import build_debug_artifact, write_debug_artifact, write_debug_index + extra: dict[str, object] = { + "crop_mode": crop, + "size": f"{short_config.width}x{short_config.height}", + "speed": speed, + "scale": short_config.scale, + "smart": short_config.smart, + } + if zoom_path is not None: + extra["smart_zoom_points"] = len(zoom_path.points) + artifact = build_debug_artifact( "render_preview" if is_preview else "render_short", result.ffmpeg_command, @@ -398,14 +728,34 @@ def _do_short( out, resolved_game_dir, ffmpeg_path, - extra={"crop_mode": crop, "size": f"{short_config.width}x{short_config.height}", "speed": speed}, + extra=extra, ) write_debug_artifact(resolved_game_dir, artifact) write_debug_index(resolved_game_dir) + + if extracted_frames is not None: + from reeln.core.zoom_debug import write_zoom_debug + + write_zoom_debug( + resolved_game_dir, + extracted_frames, + zoom_path, + short_config.width, + short_config.height, + ffmpeg_path=ffmpeg_path, + plugin_debug=plugin_debug_data, + ) + typer.echo(f"Debug: {resolved_game_dir / 'debug'}") finally: if rendered_subtitle is not None: rendered_subtitle.unlink(missing_ok=True) + if branding_subtitle is not None: + branding_subtitle.unlink(missing_ok=True) + if extracted_dir is not None: + import shutil + + shutil.rmtree(extracted_dir, ignore_errors=True) # --------------------------------------------------------------------------- @@ -419,10 +769,12 @@ def short( output: Path | None = typer.Option(None, "--output", "-o", help="Output file path."), fmt: str | None = typer.Option(None, "--format", "-f", help="Output format: vertical, square."), size: str | None = typer.Option(None, "--size", help="Custom WxH (e.g., 1080x1920)."), - crop: str = typer.Option("pad", "--crop", "-c", help="Crop mode: pad, crop."), + crop: str = typer.Option("pad", "--crop", "-c", help="Crop mode: pad, crop, smart, smart_pad."), anchor: str = typer.Option("center", "--anchor", "-a", help="Crop anchor: center/top/bottom/left/right or x,y."), pad_color: str = typer.Option("black", "--pad-color", help="Pad bar color."), speed: float = typer.Option(1.0, "--speed", help="Playback speed (0.5-2.0)."), + scale: float = typer.Option(1.0, "--scale", help="Content scale (0.5-3.0). >1.0 zooms in."), + smart: bool = typer.Option(False, "--smart", help="Smart tracking via vision plugin."), lut: Path | None = typer.Option(None, "--lut", help="LUT file (.cube/.3dl)."), subtitle: Path | None = typer.Option(None, "--subtitle", help="ASS subtitle file."), game_dir: Path | None = typer.Option(None, "--game-dir", help="Game directory for render tracking."), @@ -430,10 +782,25 @@ def short( render_profile: str | None = typer.Option(None, "--render-profile", "-r", help="Named render profile from config."), player_name: str | None = typer.Option(None, "--player", help="Player name for overlay."), assists_str: str | None = typer.Option(None, "--assists", help="Assists, comma-separated."), + player_numbers_str: str | None = typer.Option( + None, + "--player-numbers", + "-n", + help="Jersey numbers: scorer[,assist1[,assist2]]. Looked up from team roster.", + ), + event_type: str | None = typer.Option( + None, + "--event-type", + help="Event type for scoring team resolution (HOME_GOAL, AWAY_GOAL).", + ), + zoom_frames: int | None = typer.Option( + None, "--zoom-frames", help="Number of frames to extract for smart zoom (1-20)." + ), profile: str | None = typer.Option(None, "--profile", help="Named config profile."), config_path: Path | None = typer.Option(None, "--config", help="Explicit config file path."), iterate: bool = typer.Option(False, "--iterate", help="Multi-iteration mode using event type config."), debug_flag: bool = typer.Option(False, "--debug", help="Write debug artifacts to game debug directory."), + no_branding: bool = typer.Option(False, "--no-branding", help="Disable branding overlay."), dry_run: bool = typer.Option(False, "--dry-run", help="Show plan without executing."), ) -> None: """Render a 9:16 short from a clip.""" @@ -446,6 +813,8 @@ def short( anchor, pad_color, speed, + scale, + smart, lut, subtitle, game_dir, @@ -459,6 +828,10 @@ def short( debug=debug_flag, player=player_name, assists=assists_str, + zoom_frames=zoom_frames, + player_numbers=player_numbers_str, + event_type=event_type, + no_branding=no_branding, ) @@ -468,20 +841,37 @@ def preview( output: Path | None = typer.Option(None, "--output", "-o", help="Output file path."), fmt: str | None = typer.Option(None, "--format", "-f", help="Output format: vertical, square."), size: str | None = typer.Option(None, "--size", help="Custom WxH (e.g., 1080x1920)."), - crop: str = typer.Option("pad", "--crop", "-c", help="Crop mode: pad, crop."), + crop: str = typer.Option("pad", "--crop", "-c", help="Crop mode: pad, crop, smart, smart_pad."), anchor: str = typer.Option("center", "--anchor", "-a", help="Crop anchor: center/top/bottom/left/right or x,y."), pad_color: str = typer.Option("black", "--pad-color", help="Pad bar color."), speed: float = typer.Option(1.0, "--speed", help="Playback speed (0.5-2.0)."), + scale: float = typer.Option(1.0, "--scale", help="Content scale (0.5-3.0). >1.0 zooms in."), + smart: bool = typer.Option(False, "--smart", help="Smart tracking via vision plugin."), lut: Path | None = typer.Option(None, "--lut", help="LUT file (.cube/.3dl)."), subtitle: Path | None = typer.Option(None, "--subtitle", help="ASS subtitle file."), game_dir: Path | None = typer.Option(None, "--game-dir", help="Game directory for render tracking."), render_profile: str | None = typer.Option(None, "--render-profile", "-r", help="Named render profile from config."), player_name: str | None = typer.Option(None, "--player", help="Player name for overlay."), assists_str: str | None = typer.Option(None, "--assists", help="Assists, comma-separated."), + player_numbers_str: str | None = typer.Option( + None, + "--player-numbers", + "-n", + help="Jersey numbers: scorer[,assist1[,assist2]]. Looked up from team roster.", + ), + event_type: str | None = typer.Option( + None, + "--event-type", + help="Event type for scoring team resolution (HOME_GOAL, AWAY_GOAL).", + ), + zoom_frames: int | None = typer.Option( + None, "--zoom-frames", help="Number of frames to extract for smart zoom (1-20)." + ), profile: str | None = typer.Option(None, "--profile", help="Named config profile."), config_path: Path | None = typer.Option(None, "--config", help="Explicit config file path."), iterate: bool = typer.Option(False, "--iterate", help="Multi-iteration mode using event type config."), debug_flag: bool = typer.Option(False, "--debug", help="Write debug artifacts to game debug directory."), + no_branding: bool = typer.Option(False, "--no-branding", help="Disable branding overlay."), dry_run: bool = typer.Option(False, "--dry-run", help="Show plan without executing."), ) -> None: """Fast low-res preview render.""" @@ -494,6 +884,8 @@ def preview( anchor, pad_color, speed, + scale, + smart, lut, subtitle, game_dir, @@ -506,6 +898,10 @@ def preview( debug=debug_flag, player=player_name, assists=assists_str, + zoom_frames=zoom_frames, + player_numbers=player_numbers_str, + event_type=event_type, + no_branding=no_branding, ) @@ -616,6 +1012,13 @@ def apply_profile( event: str | None = typer.Option(None, "--event", help="Event ID for template context."), player_name: str | None = typer.Option(None, "--player", help="Player name for overlay."), assists_str: str | None = typer.Option(None, "--assists", help="Assists, comma-separated."), + player_numbers_str: str | None = typer.Option( + None, + "--player-numbers", + "-n", + help="Jersey numbers: scorer[,assist1[,assist2]]. Looked up from team roster.", + ), + event_type: str | None = typer.Option(None, "--event-type", help="Event type for scoring team resolution."), profile: str | None = typer.Option(None, "--profile", help="Named config profile."), config_path: Path | None = typer.Option(None, "--config", help="Explicit config file path."), iterate: bool = typer.Option(False, "--iterate", help="Multi-iteration mode using event type config."), @@ -638,19 +1041,34 @@ def apply_profile( activate_plugins(config.plugins) + # Resolve --player-numbers before anything else + _scoring_team_name: str | None = None + if player_numbers_str is not None: + scorer, assists_from_roster, _scoring_team_name = _resolve_player_numbers( + player_numbers_str, event_type, game_dir, config.paths.output_dir, clip + ) + if player_name is None: + player_name = scorer + if assists_str is None: + assists_str = assists_from_roster + out = output or _default_output(clip, f"_{render_profile}") # Build template context for subtitle rendering - game_event = None - game_info = None - if game_dir is not None: + from reeln.models.game import GameEvent as _ApplyEvent + from reeln.models.game import GameInfo as _ApplyInfo + + apply_game_event: _ApplyEvent | None = None + apply_game_info: _ApplyInfo | None = None + resolved_game_dir = game_dir or _find_game_dir(config.paths.output_dir, clip) + if resolved_game_dir is not None: try: from reeln.core.highlights import load_game_state - state = load_game_state(game_dir) - game_info = state.game_info + state = load_game_state(resolved_game_dir) + apply_game_info = state.game_info if event is not None: - game_event = next((e for e in state.events if e.id == event), None) + apply_game_event = next((e for e in state.events if e.id == event), None) except ReelnError: pass # non-fatal: just skip context @@ -659,12 +1077,12 @@ def apply_profile( from reeln.core.iterations import render_iterations from reeln.core.profiles import profiles_for_event - profile_list = profiles_for_event(config, game_event) + profile_list = profiles_for_event(config, apply_game_event) if profile_list: - ctx = build_base_context(game_info, game_event) if game_info else None + ctx = build_base_context(apply_game_info, apply_game_event) if apply_game_info else None if player_name is not None and ctx is not None: ctx = TemplateContext(variables={**ctx.variables, "player": player_name}) - event_meta = dict(game_event.metadata) if game_event else None + event_meta = dict(apply_game_event.metadata) if apply_game_event else None if assists_str is not None: event_meta = event_meta or {} event_meta["assists"] = assists_str @@ -681,6 +1099,10 @@ def apply_profile( context=ctx, event_metadata=event_meta, dry_run=dry_run, + game_info=apply_game_info, + game_event=apply_game_event, + player=player_name, + assists=assists_str, ) except ReelnError as exc: typer.echo(f"Error: {exc}", err=True) @@ -699,11 +1121,14 @@ def apply_profile( rendered_subtitle: Path | None = None try: if rp.subtitle_template is not None: - ctx = build_base_context(game_info, game_event) if game_info else TemplateContext() + ctx = ( + build_base_context(apply_game_info, apply_game_event) + if apply_game_info else TemplateContext() + ) if player_name is not None: ctx = TemplateContext(variables={**ctx.variables, "player": player_name}) - event_meta = dict(game_event.metadata) if game_event else None + event_meta = dict(apply_game_event.metadata) if apply_game_event else None if assists_str is not None: event_meta = event_meta or {} event_meta["assists"] = assists_str @@ -714,7 +1139,13 @@ def apply_profile( from reeln.core.overlay import build_overlay_context dur = _probe_dur(_disc(), clip) or 10.0 - ctx = build_overlay_context(ctx, duration=dur, event_metadata=event_meta) + ctx = build_overlay_context( + ctx, + duration=dur, + event_metadata=event_meta, + scoring_team=_scoring_team_name, + ) + out.parent.mkdir(parents=True, exist_ok=True) rendered_subtitle = resolve_subtitle_for_profile(rp, ctx, out.parent) try: @@ -739,15 +1170,37 @@ def apply_profile( from reeln.core.ffmpeg import discover_ffmpeg from reeln.core.renderer import FFmpegRenderer - + from reeln.plugins.hooks import Hook as _ApplyHook + from reeln.plugins.hooks import HookContext as _ApplyHookCtx + from reeln.plugins.registry import get_registry as _apply_get_reg + + out.parent.mkdir(parents=True, exist_ok=True) + _apply_get_reg().emit( + _ApplyHook.PRE_RENDER, + _ApplyHookCtx(hook=_ApplyHook.PRE_RENDER, data={"plan": plan}), + ) try: ffmpeg_path = discover_ffmpeg() renderer = FFmpegRenderer(ffmpeg_path) - result = renderer.render(plan) + result = renderer.render(plan, emit_hooks=False) except ReelnError as exc: typer.echo(f"Error: {exc}", err=True) raise typer.Exit(code=1) from exc + _apply_post: dict[str, Any] = {"plan": plan, "result": result} + if apply_game_info is not None: + _apply_post["game_info"] = apply_game_info + if apply_game_event is not None: + _apply_post["game_event"] = apply_game_event + if player_name is not None: + _apply_post["player"] = player_name + if assists_str is not None: + _apply_post["assists"] = assists_str + _apply_get_reg().emit( + _ApplyHook.POST_RENDER, + _ApplyHookCtx(hook=_ApplyHook.POST_RENDER, data=_apply_post), + ) + if result.duration_seconds is not None: typer.echo(f"Duration: {result.duration_seconds:.1f}s") if result.file_size_bytes is not None: @@ -755,7 +1208,7 @@ def apply_profile( typer.echo(f"File size: {size_mb:.1f} MB") typer.echo("Render complete") - if debug_flag and game_dir is not None and result.ffmpeg_command: + if debug_flag and resolved_game_dir is not None and result.ffmpeg_command: from reeln.core.debug import build_debug_artifact, write_debug_artifact, write_debug_index artifact = build_debug_artifact( @@ -763,13 +1216,13 @@ def apply_profile( result.ffmpeg_command, [clip], out, - game_dir, + resolved_game_dir, ffmpeg_path, extra={"profile": render_profile}, ) - write_debug_artifact(game_dir, artifact) - write_debug_index(game_dir) - typer.echo(f"Debug: {game_dir / 'debug'}") + write_debug_artifact(resolved_game_dir, artifact) + write_debug_index(resolved_game_dir) + typer.echo(f"Debug: {resolved_game_dir / 'debug'}") finally: if rendered_subtitle is not None: rendered_subtitle.unlink(missing_ok=True) diff --git a/reeln/core/config.py b/reeln/core/config.py index dde7aa7..54d6b15 100644 --- a/reeln/core/config.py +++ b/reeln/core/config.py @@ -12,6 +12,7 @@ from reeln.core.errors import ConfigError from reeln.core.log import get_logger +from reeln.models.branding import BrandingConfig from reeln.models.config import AppConfig, PathConfig, PluginsConfig, VideoConfig from reeln.models.plugin import OrchestrationConfig from reeln.models.profile import ( @@ -146,6 +147,18 @@ def config_to_dict(config: AppConfig, *, full: bool = False) -> dict[str, Any]: }, } + has_branding = ( + not config.branding.enabled + or config.branding.template != "builtin:branding" + or config.branding.duration != 5.0 + ) + if full or has_branding: + d["branding"] = { + "enabled": config.branding.enabled, + "template": config.branding.template, + "duration": config.branding.duration, + } + if full or config.render_profiles: d["render_profiles"] = { name: render_profile_to_dict(profile) for name, profile in config.render_profiles.items() @@ -243,6 +256,16 @@ def dict_to_config(data: dict[str, Any]) -> AppConfig: enforce_hooks=bool(raw_plugins.get("enforce_hooks", True)), ) + # Branding + raw_branding = data.get("branding", {}) + branding_cfg = BrandingConfig() + if isinstance(raw_branding, dict): + branding_cfg = BrandingConfig( + enabled=bool(raw_branding.get("enabled", True)), + template=str(raw_branding.get("template", "builtin:branding")), + duration=float(raw_branding.get("duration", 5.0)), + ) + return AppConfig( config_version=int(data.get("config_version", CURRENT_CONFIG_VERSION)), sport=str(data.get("sport", "generic")), @@ -250,6 +273,7 @@ def dict_to_config(data: dict[str, Any]) -> AppConfig: paths=_dict_to_path_config(data.get("paths", {})), render_profiles=profiles, iterations=iterations, + branding=branding_cfg, orchestration=orchestration, plugins=plugins_cfg, ) @@ -346,6 +370,10 @@ def validate_config(data: dict[str, Any]) -> list[str]: if orchestration is not None and not isinstance(orchestration, dict): issues.append("'orchestration' section must be a dict") + branding = data.get("branding") + if branding is not None and not isinstance(branding, dict): + issues.append("'branding' section must be a dict") + plugins = data.get("plugins") if plugins is not None and not isinstance(plugins, dict): issues.append("'plugins' section must be a dict") @@ -422,8 +450,10 @@ def load_config( base = config_to_dict(default_config()) # Determine whether the user explicitly requested a specific config - explicit = path is not None or profile is not None or bool( - os.environ.get("REELN_CONFIG") or os.environ.get("REELN_PROFILE") + explicit = ( + path is not None + or profile is not None + or bool(os.environ.get("REELN_CONFIG") or os.environ.get("REELN_PROFILE")) ) file_path = resolve_config_path(path, profile) diff --git a/reeln/core/debug.py b/reeln/core/debug.py index 9281063..836e100 100644 --- a/reeln/core/debug.py +++ b/reeln/core/debug.py @@ -186,7 +186,7 @@ def collect_debug_artifacts(game_dir: Path) -> list[DebugArtifact]: log.debug("Skipping corrupt debug artifact: %s", f) continue - return sorted(artifacts, key=lambda a: a.timestamp) + return sorted(artifacts, key=lambda a: a.timestamp, reverse=True) # --------------------------------------------------------------------------- @@ -223,11 +223,67 @@ def write_debug_index(game_dir: Path) -> Path: "", "", "", - "

reeln Debug Index

", + "

" + "reeln" + "reeln Debug Index

", f"

Game directory: {html.escape(str(game_dir))}

", f"

Generated: {html.escape(datetime.now(tz=UTC).isoformat())}

", ] + # Zoom debug section (if zoom/ subdirectory exists) + zoom_dir = d / "zoom" + if zoom_dir.is_dir(): + lines.append("
") + lines.append("

Smart Zoom Debug

") + + # Plugin debug (prompts, model info) + plugin_debug_json = zoom_dir / "plugin_debug.json" + if plugin_debug_json.is_file(): + try: + plugin_data = json.loads(plugin_debug_json.read_text(encoding="utf-8")) + lines.append("

Plugin debug data:

") + lines.append(f"
{html.escape(json.dumps(plugin_data, indent=2))}
") + except (json.JSONDecodeError, OSError): + pass + + # Zoom path JSON link + zoom_json = zoom_dir / "zoom_path.json" + if zoom_json.is_file(): + lines.append("

Zoom path: zoom_path.json

") + + # Collect frame images + frames = sorted(zoom_dir.glob("frame_*.png")) + annotated = sorted(zoom_dir.glob("annotated_*.png")) + + if annotated: + lines.append( + "

Annotated frames (crosshair = detected center, green box = crop region):

" + ) + lines.append("
") + for img in annotated: + lines.append( + f"" + f"" + ) + lines.append("
") + + if frames: + lines.append("

Extracted frames:

") + lines.append("
") + for img in frames: + lines.append( + f"" + f"" + ) + lines.append("
") + + lines.append("
") + if not artifacts: lines.append("

No debug artifacts found.

") else: diff --git a/reeln/core/ffmpeg.py b/reeln/core/ffmpeg.py index 96536a3..bcf67b3 100644 --- a/reeln/core/ffmpeg.py +++ b/reeln/core/ffmpeg.py @@ -250,9 +250,11 @@ def _run_probe_float(cmd: list[str]) -> float | None: def list_codecs(ffmpeg_path: Path) -> list[str]: - """Return a list of encoding-capable codec names. + """Return a list of encoding-capable codec names and encoder names. - Parses ``ffmpeg -codecs`` output. Returns an empty list on error. + Parses ``ffmpeg -codecs`` output. Each encoding-capable line yields + the codec family name (e.g. ``h264``) **and** any specific encoder + names listed in ``(encoders: ...)``. Returns an empty list on error. """ proc = _run_probe([str(ffmpeg_path), "-codecs"]) if proc is None: @@ -263,13 +265,21 @@ def list_codecs(ffmpeg_path: Path) -> list[str]: stripped = line.strip() # Codec lines have 6-char flags then a space then the codec name. # Encoding-capable codecs have 'E' at position 1 (0-indexed). - # Example: "DEV.LS libx264 ..." + # Example: "DEV.LS h264 ... (encoders: libx264 libx264rgb h264_videotoolbox)" if len(stripped) < 8: continue flags = stripped[:6] if len(flags) >= 2 and flags[1] == "E": - codec_name = stripped[6:].split()[0] + rest = stripped[6:].strip() + codec_name = rest.split()[0] codecs.append(codec_name) + # Extract encoder names from "(encoders: name1 name2 ...)" + idx = rest.find("(encoders:") + if idx != -1: + close = rest.find(")", idx) + if close != -1: + encoder_str = rest[idx + len("(encoders:") : close].strip() + codecs.extend(encoder_str.split()) return codecs @@ -348,6 +358,77 @@ def build_concat_command( # --------------------------------------------------------------------------- +def build_xfade_command( + ffmpeg_path: Path, + files: list[Path], + durations: list[float], + output: Path, + *, + fade_duration: float = 0.5, + video_codec: str = "libx264", + crf: int = 18, + audio_codec: str = "aac", + audio_rate: int = 48000, +) -> list[str]: + """Build an ffmpeg command that concatenates files with cross-fade transitions. + + Uses the ``xfade`` video filter and ``acrossfade`` audio filter to + create smooth fade transitions between adjacent clips. + + *durations* must have the same length as *files* — each entry is the + duration of the corresponding input in seconds. + """ + if len(files) != len(durations): + raise FFmpegError( + f"files and durations must have the same length: {len(files)} vs {len(durations)}" + ) + if len(files) < 2: + raise FFmpegError("xfade requires at least 2 input files") + + cmd: list[str] = [str(ffmpeg_path), "-y"] + for f in files: + cmd.extend(["-i", str(f)]) + + n = len(files) + fade = min(fade_duration, min(durations) / 2) + + # Build xfade chain: [0:v][1:v]xfade=...=offset[xf0];[xf0][2:v]xfade=...[xf1]... + v_parts: list[str] = [] + a_parts: list[str] = [] + + # Track the cumulative offset (accounting for fade overlap) + offset = durations[0] - fade + for i in range(1, n): + v_in = f"[{i - 1}:v]" if i == 1 else f"[xf{i - 2}]" + v_out = f"[xf{i - 1}]" if i < n - 1 else "[vout]" + v_parts.append( + f"{v_in}[{i}:v]xfade=transition=fade:duration={fade}:offset={offset:.6f}{v_out}" + ) + + a_in = f"[{i - 1}:a]" if i == 1 else f"[af{i - 2}]" + a_out = f"[af{i - 1}]" if i < n - 1 else "[aout]" + a_parts.append( + f"{a_in}[{i}:a]acrossfade=d={fade}:c1=tri:c2=tri{a_out}" + ) + + if i < n - 1: + offset += durations[i] - fade + + filter_complex = ";".join(v_parts + a_parts) + + cmd.extend([ + "-filter_complex", filter_complex, + "-map", "[vout]", + "-map", "[aout]", + "-c:v", video_codec, + "-crf", str(crf), + "-c:a", audio_codec, + "-ar", str(audio_rate), + str(output), + ]) + return cmd + + def write_concat_file(files: list[Path], output_dir: Path) -> Path: """Write an ffmpeg concat demuxer list to a temp file. @@ -392,6 +473,33 @@ def run_ffmpeg(cmd: list[str], *, timeout: int = 600) -> subprocess.CompletedPro return proc +def build_extract_frame_command( + ffmpeg_path: Path, + input_path: Path, + timestamp: float, + output_path: Path, +) -> list[str]: + """Build an ffmpeg command to extract a single frame at a given timestamp. + + Uses seek-then-decode for accurate frame extraction. + """ + return [ + str(ffmpeg_path), + "-y", + "-v", + "error", + "-ss", + f"{timestamp:.3f}", + "-i", + str(input_path), + "-frames:v", + "1", + "-update", + "1", + str(output_path), + ] + + def build_short_command(ffmpeg_path: Path, plan: RenderPlan) -> list[str]: """Build an ffmpeg command for short-form rendering with filter chains. @@ -407,6 +515,10 @@ def build_short_command(ffmpeg_path: Path, plan: RenderPlan) -> list[str]: ] if plan.filter_complex: cmd.extend(["-filter_complex", plan.filter_complex]) + # When audio is embedded in filter_complex (speed_segments), add + # explicit stream mapping so ffmpeg picks the correct output pads. + if "[vfinal]" in plan.filter_complex and "[afinal]" in plan.filter_complex: + cmd.extend(["-map", "[vfinal]", "-map", "[afinal]"]) if plan.audio_filter: cmd.extend(["-af", plan.audio_filter]) cmd.extend( diff --git a/reeln/core/finish.py b/reeln/core/finish.py index 910eac8..32d695a 100644 --- a/reeln/core/finish.py +++ b/reeln/core/finish.py @@ -3,6 +3,7 @@ from __future__ import annotations import logging +import shutil from datetime import UTC, datetime from pathlib import Path @@ -38,6 +39,7 @@ def finish_game( state.finished_at = datetime.now(UTC).isoformat() if not dry_run: + _relocated, reloc_messages = relocate_outputs(game_dir, state) save_game_state(state, game_dir) from reeln.plugins.hooks import Hook, HookContext @@ -48,17 +50,66 @@ def finish_game( get_registry().emit(Hook.ON_GAME_FINISH, ctx) # Second pass — plugins read what others wrote during FINISH - post_ctx = HookContext( - hook=Hook.ON_POST_GAME_FINISH, data=hook_data, shared=ctx.shared - ) + post_ctx = HookContext(hook=Hook.ON_POST_GAME_FINISH, data=hook_data, shared=ctx.shared) get_registry().emit(Hook.ON_POST_GAME_FINISH, post_ctx) log.info("Game finished: %s", game_dir) + else: + reloc_messages = [] messages = _build_summary(state) + messages.extend(reloc_messages) return state, messages +def relocate_outputs( + game_dir: Path, + state: GameState, + *, + dry_run: bool = False, +) -> tuple[list[Path], list[str]]: + """Move segment and highlights outputs into ``game_dir / outputs/``. + + Files are located in ``game_dir.parent`` (the shared output directory). + Missing files are skipped gracefully. + + Returns ``(relocated_paths, messages)``. + """ + source_dir = game_dir.parent + outputs_dir = game_dir / "outputs" + + filenames: list[str] = list(state.segment_outputs) + if state.highlights_output: + filenames.append(state.highlights_output) + + if not filenames: + return [], [] + + relocated: list[Path] = [] + messages: list[str] = [] + + for name in filenames: + src = source_dir / name + if not src.is_file(): + log.debug("Skipping missing output: %s", src) + continue + + if not dry_run: + outputs_dir.mkdir(parents=True, exist_ok=True) + dst = outputs_dir / name + shutil.move(str(src), str(dst)) + relocated.append(dst) + messages.append(f"Relocated {name} → outputs/") + else: + relocated.append(src) + messages.append(f"Would relocate {name} → outputs/") + + if relocated: + log.info("Relocated %d output(s) to %s", len(relocated), outputs_dir) + + return relocated, messages + + def _build_summary(state: GameState) -> list[str]: """Build human-readable summary lines for a finished game.""" info = state.game_info diff --git a/reeln/core/highlights.py b/reeln/core/highlights.py index 2acf214..11383f8 100644 --- a/reeln/core/highlights.py +++ b/reeln/core/highlights.py @@ -109,6 +109,36 @@ def create_game_directory(base_dir: Path, game_info: GameInfo) -> Path: return game_dir +# --------------------------------------------------------------------------- +# Unfinished-game detection +# --------------------------------------------------------------------------- + + +def find_unfinished_games(base_dir: Path) -> list[Path]: + """Scan *base_dir* subdirectories for unfinished games. + + Returns a list of game directory paths that contain a ``game.json`` + with ``finished`` set to ``False``. + """ + if not base_dir.is_dir(): + return [] + + unfinished: list[Path] = [] + for entry in sorted(base_dir.iterdir()): + if not entry.is_dir(): + continue + state_file = entry / _GAME_STATE_FILE + if not state_file.is_file(): + continue + try: + raw = json.loads(state_file.read_text(encoding="utf-8")) + except (json.JSONDecodeError, OSError): + continue + if not raw.get("finished", False): + unfinished.append(entry) + return unfinished + + # --------------------------------------------------------------------------- # Init orchestrator # --------------------------------------------------------------------------- @@ -132,6 +162,12 @@ def init_game( In dry-run mode, no files or directories are created. """ + # Block if unfinished games exist + unfinished = find_unfinished_games(base_dir) + if unfinished: + names = ", ".join(d.name for d in unfinished) + raise MediaError(f"Unfinished game(s) found: {names}. Run 'reeln game finish' before starting a new game.") + # Validate sport early alias = get_sport(game_info.sport) @@ -181,6 +217,8 @@ def init_game( state = load_game_state(game_dir) state.livestreams = dict(livestreams) save_game_state(state, game_dir) + for platform, url in livestreams.items(): + messages.append(f"Livestream ({platform}): {url}") messages.append(f"Created {_GAME_STATE_FILE}") log.info("Game initialized: %s", game_dir) @@ -400,7 +438,8 @@ def process_segment( extensions = {f.suffix.lower() for f in videos} copy = len(extensions) <= 1 - output_name = f"{alias}_{state.game_info.date}.mkv" + out_ext = next(iter(extensions)) if len(extensions) == 1 else ".mkv" + output_name = f"{alias}_{state.game_info.date}{out_ext}" output = game_dir.parent / output_name messages: list[str] = [] @@ -487,6 +526,8 @@ def process_segment( state.events.extend(new_events) if segment_number not in state.segments_processed: state.segments_processed.append(segment_number) + if output.name not in state.segment_outputs: + state.segment_outputs.append(output.name) save_game_state(state, game_dir) get_registry().emit( @@ -548,15 +589,19 @@ def merge_game_highlights( sport = info.sport alias_info = get_sport(sport) - # Find segment highlight files in order + # Find segment highlight files in order (any video extension) + from reeln.core.ffmpeg import _VIDEO_EXTENSIONS + segment_files: list[Path] = [] segments = make_segments(sport) for seg in segments: seg_alias = segment_dir_name(sport, seg.number) - pattern = f"{seg_alias}_{info.date}.mkv" - candidate = game_dir.parent / pattern - if candidate.is_file(): - segment_files.append(candidate) + prefix = f"{seg_alias}_{info.date}" + for ext in sorted(_VIDEO_EXTENSIONS): + candidate = game_dir.parent / f"{prefix}{ext}" + if candidate.is_file(): + segment_files.append(candidate) + break if not segment_files: raise MediaError(f"No segment highlight files found in {game_dir.parent}. Run 'reeln game segment' first.") @@ -565,7 +610,8 @@ def merge_game_highlights( extensions = {f.suffix.lower() for f in segment_files} copy = len(extensions) <= 1 - output_name = f"{info.home_team}_vs_{info.away_team}_{info.date}.mkv" + out_ext = next(iter(extensions)) if len(extensions) == 1 else ".mkv" + output_name = f"{info.home_team}_vs_{info.away_team}_{info.date}{out_ext}" output = game_dir.parent / output_name messages: list[str] = [] @@ -617,6 +663,7 @@ def merge_game_highlights( # Update game state state.highlighted = True + state.highlights_output = output.name save_game_state(state, game_dir) from reeln.plugins.hooks import Hook, HookContext diff --git a/reeln/core/iterations.py b/reeln/core/iterations.py index 927a750..c6b74c5 100644 --- a/reeln/core/iterations.py +++ b/reeln/core/iterations.py @@ -10,6 +10,8 @@ from reeln.core.errors import RenderError from reeln.core.ffmpeg import ( build_concat_command, + build_xfade_command, + probe_duration, run_ffmpeg, write_concat_file, ) @@ -26,6 +28,7 @@ from reeln.models.render_plan import IterationResult from reeln.models.short import ShortConfig from reeln.models.template import TemplateContext +from reeln.models.zoom import ZoomPath log: logging.Logger = get_logger(__name__) @@ -46,7 +49,13 @@ def render_iterations( event_metadata: dict[str, Any] | None = None, is_short: bool = False, short_config: ShortConfig | None = None, + zoom_path: ZoomPath | None = None, + source_fps: float = 30.0, dry_run: bool = False, + game_info: object | None = None, + game_event: object | None = None, + player: str | None = None, + assists: str | None = None, ) -> tuple[IterationResult, list[str]]: """Render *clip* through multiple profiles and concatenate the results. @@ -57,6 +66,10 @@ def render_iterations( 3. Plan a render (short-form or full-frame) 4. Execute via ``FFmpegRenderer`` + When *zoom_path* is provided (from smart zoom frame extraction), it is + passed through to ``plan_short()`` so smart crop/pad works within + iterations. + When more than one iteration produces output, the results are concatenated (stream copy) into the final *output*. A single iteration simply renames its temp file. @@ -86,15 +99,12 @@ def render_iterations( ) return result, messages - ctx = context or TemplateContext() + base_ctx = context or TemplateContext() - # Enrich context with overlay variables when event metadata is available + # Probe source duration once for overlay timing + source_dur: float | None = None if event_metadata is not None: - from reeln.core.ffmpeg import probe_duration - from reeln.core.overlay import build_overlay_context - - dur = probe_duration(ffmpeg_path, clip) or 10.0 - ctx = build_overlay_context(ctx, duration=dur, event_metadata=event_metadata) + source_dur = probe_duration(ffmpeg_path, clip) or 10.0 renderer = FFmpegRenderer(ffmpeg_path) @@ -105,6 +115,22 @@ def render_iterations( for i, profile in enumerate(profiles): temp_out = _iteration_temp(output, i) + # Build per-iteration overlay context — speed_segments change + # the effective output duration, so subtitle timing must match. + ctx = base_ctx + if source_dur is not None and event_metadata is not None: + from reeln.core.overlay import build_overlay_context + from reeln.core.shorts import compute_speed_segments_duration + + iter_dur = source_dur + if profile.speed_segments is not None: + iter_dur = compute_speed_segments_duration( + profile.speed_segments, source_dur, + ) + ctx = build_overlay_context( + base_ctx, duration=iter_dur, event_metadata=event_metadata, + ) + # Resolve subtitle template rendered_subtitle: Path | None = None if profile.subtitle_template is not None: @@ -117,12 +143,26 @@ def render_iterations( modified = apply_profile_to_short(short_config, profile, rendered_subtitle=rendered_subtitle) modified = replace( modified, - input=clip if i == 0 else temp_outputs[-1], + input=clip, output=temp_out, ) - plan = plan_short(modified) + # Branding only on the first iteration + if i > 0: + modified = replace(modified, branding=None) + # When speed_segments and smart tracking combine, remap + # zoom path timestamps from source time to output time so + # the t-based ffmpeg expressions align with the stretched + # timeline. + iter_zoom = zoom_path + if modified.speed_segments is not None and iter_zoom is not None: + from reeln.core.zoom import remap_zoom_path_for_speed_segments + + iter_zoom = remap_zoom_path_for_speed_segments( + iter_zoom, modified.speed_segments, + ) + plan = plan_short(modified, zoom_path=iter_zoom, source_fps=source_fps) else: - input_file = clip if i == 0 else temp_outputs[-1] + input_file = clip plan = plan_full_frame( input_file, temp_out, @@ -131,7 +171,7 @@ def render_iterations( rendered_subtitle=rendered_subtitle, ) - renderer.render(plan) + renderer.render(plan, emit_hooks=False) temp_outputs.append(temp_out) # Concatenate or rename @@ -139,21 +179,71 @@ def render_iterations( temp_outputs[0].rename(output) messages.append(f"Output: {output}") else: - concat_file = write_concat_file(temp_outputs, output.parent) + # Probe durations for xfade transitions + iter_durations: list[float] = [] + for tmp in temp_outputs: + dur = probe_duration(ffmpeg_path, tmp) + iter_durations.append(dur if dur is not None else 10.0) + try: - cmd = build_concat_command(ffmpeg_path, concat_file, output, copy=True) + cmd = build_xfade_command( + ffmpeg_path, + temp_outputs, + iter_durations, + output, + fade_duration=0.5, + ) run_ffmpeg(cmd) - finally: - concat_file.unlink(missing_ok=True) + except Exception: + # Fall back to concat demuxer if xfade fails + log.warning("xfade failed, falling back to concat demuxer") + concat_file = write_concat_file(temp_outputs, output.parent) + try: + cmd = build_concat_command(ffmpeg_path, concat_file, output, copy=False) + run_ffmpeg(cmd) + finally: + concat_file.unlink(missing_ok=True) messages.append(f"Concatenated {len(temp_outputs)} iterations") messages.append(f"Output: {output}") + # Emit POST_RENDER once for the final concatenated output + from reeln.plugins.hooks import Hook + from reeln.plugins.hooks import HookContext as PluginContext + from reeln.plugins.registry import get_registry + + final_duration = probe_duration(ffmpeg_path, output) + file_size: int | None = None + if output.is_file(): + file_size = output.stat().st_size + from reeln.models.render_plan import RenderResult + + final_result = RenderResult( + output=output, + duration_seconds=final_duration, + file_size_bytes=file_size, + ) + # Use the last iteration's plan for filter_complex detection + final_plan = plan + hook_data: dict[str, Any] = {"plan": final_plan, "result": final_result} + if game_info is not None: + hook_data["game_info"] = game_info + if game_event is not None: + hook_data["game_event"] = game_event + if player is not None: + hook_data["player"] = player + if assists is not None: + hook_data["assists"] = assists + get_registry().emit( + Hook.POST_RENDER, + PluginContext(hook=Hook.POST_RENDER, data=hook_data), + ) + messages.append("Iteration rendering complete") result = IterationResult( output=output, iteration_outputs=list(temp_outputs), profile_names=list(profile_names), - concat_copy=len(temp_outputs) > 1, + concat_copy=False, ) return result, messages diff --git a/reeln/core/overlay.py b/reeln/core/overlay.py index 5392227..d8e2c61 100644 --- a/reeln/core/overlay.py +++ b/reeln/core/overlay.py @@ -83,6 +83,7 @@ def build_overlay_context( home_colors: list[str] | None = None, away_colors: list[str] | None = None, y_offset: int = 0, + scoring_team: str | None = None, ) -> TemplateContext: """Enrich a template context with overlay-specific variables. @@ -98,23 +99,21 @@ def build_overlay_context( assist_1, assist_2 = _parse_assists(event_metadata) has_assists = bool(assist_1 or assist_2) - # Timing + # Timing — add 1s buffer past video duration so the overlay never + # disappears before the last frame (ffmpeg truncates to the shorter + # stream via shortest=1 or the video's own duration). + end_time = duration + 1.0 scorer_start = format_ass_time(0.0) - scorer_end = format_ass_time(duration) + scorer_end = format_ass_time(end_time) assist_start = format_ass_time(0.0) - assist_end = format_ass_time(duration if has_assists else 0.0) + assist_end = format_ass_time(end_time if has_assists else 0.0) + box_end = format_ass_time(end_time) # Font sizing scorer_base = 46 if has_assists else 54 scorer_min = 32 if has_assists else 38 - goal_scorer_fs = str( - overlay_font_size(scorer_text, base=scorer_base, min_size=scorer_min, max_chars=24) - ) - goal_assist_fs = str( - overlay_font_size( - f"{assist_1} {assist_2}".strip(), base=24, min_size=18, max_chars=30 - ) - ) + goal_scorer_fs = str(overlay_font_size(scorer_text, base=scorer_base, min_size=scorer_min, max_chars=24)) + goal_assist_fs = str(overlay_font_size(f"{assist_1} {assist_2}".strip(), base=24, min_size=18, max_chars=30)) # Colors primary_rgb = _DEFAULT_PRIMARY @@ -139,12 +138,22 @@ def build_overlay_context( ass_team_text_color = rgb_to_ass(name_rgb, 0x40) ass_name_outline_color = rgb_to_ass(outline_rgb, 0) - # Team and level from base context - goal_scorer_team = base.get("home_team", "") - team_level = base.get("sport", "") + # Team and level from base context — uppercase for visual emphasis. + # When a tournament is configured, promote it to the title position + # and combine team/level into the secondary slot. + tournament = base.get("tournament", "").strip() + team_name = (scoring_team if scoring_team is not None else base.get("home_team", "")).upper() + level = base.get("level", "").upper() + if tournament: + goal_scorer_team = tournament.upper() + team_level = f"{team_name}/{level}" if level else team_name + else: + goal_scorer_team = team_name + team_level = level # Layout coordinates (ported from old CLI) variables: dict[str, str] = { + "box_end": box_end, "goal_scorer_text": scorer_text, "goal_assist_1": assist_1, "goal_assist_2": assist_2, diff --git a/reeln/core/profiles.py b/reeln/core/profiles.py index b1cac6e..94cc664 100644 --- a/reeln/core/profiles.py +++ b/reeln/core/profiles.py @@ -13,7 +13,9 @@ build_audio_speed_filter, build_lut_filter, build_speed_filter, + build_speed_segments_filters, build_subtitle_filter, + validate_speed_segments, ) from reeln.core.templates import render_template_file from reeln.models.config import AppConfig @@ -82,8 +84,14 @@ def apply_profile_to_short( overrides["anchor_y"] = profile.anchor_y if profile.pad_color is not None: overrides["pad_color"] = profile.pad_color + if profile.scale is not None: + overrides["scale"] = profile.scale + if profile.smart is not None: + overrides["smart"] = profile.smart if profile.speed is not None: overrides["speed"] = profile.speed + if profile.speed_segments is not None: + overrides["speed_segments"] = profile.speed_segments if profile.lut is not None: overrides["lut"] = Path(profile.lut) if profile.codec is not None: @@ -113,15 +121,29 @@ def build_profile_filter_chain( ) -> tuple[str | None, str | None]: """Build filter_complex and audio_filter for full-frame rendering. - Only applies: LUT -> speed -> subtitle. No crop/scale. + Only applies: LUT -> speed (or speed_segments) -> subtitle. No crop/scale. Returns ``(filter_complex, audio_filter)`` — either may be ``None``. """ + has_speed = profile.speed is not None and profile.speed != 1.0 + has_segments = profile.speed_segments is not None + + if has_speed and has_segments: + raise RenderError("Cannot use both speed and speed_segments — they are mutually exclusive") + + if has_segments: + assert profile.speed_segments is not None + validate_speed_segments(profile.speed_segments) + return _build_profile_speed_segments_chain( + profile, rendered_subtitle=rendered_subtitle + ) + filters: list[str] = [] if profile.lut is not None: filters.append(build_lut_filter(Path(profile.lut))) - if profile.speed is not None and profile.speed != 1.0: + if has_speed: + assert profile.speed is not None filters.append(build_speed_filter(profile.speed)) if rendered_subtitle is not None: @@ -130,12 +152,54 @@ def build_profile_filter_chain( filter_complex = ",".join(filters) if filters else None audio_filter: str | None = None - if profile.speed is not None and profile.speed != 1.0: + if has_speed: + assert profile.speed is not None audio_filter = build_audio_speed_filter(profile.speed) return filter_complex, audio_filter +def _build_profile_speed_segments_chain( + profile: RenderProfile, + *, + rendered_subtitle: Path | None = None, +) -> tuple[str, None]: + """Build a full filter_complex for full-frame speed_segments rendering. + + Graph: [0:v]{pre} → split/trim/speed/concat → {post} + Audio goes through filter_complex too (audio_filter returns None). + """ + assert profile.speed_segments is not None + + pre: list[str] = [] + if profile.lut is not None: + pre.append(build_lut_filter(Path(profile.lut))) + + post: list[str] = [] + if rendered_subtitle is not None: + post.append(build_subtitle_filter(rendered_subtitle)) + + video_segs, audio_segs = build_speed_segments_filters(profile.speed_segments) + + # Wire pre-filters + if pre: + video_graph = video_segs.replace("[_vsrc]", f"[0:v]{','.join(pre)},", 1) + else: + video_graph = video_segs.replace("[_vsrc]", "[0:v]", 1) + + # Wire post-filters + if post: + video_graph = video_graph.replace("[_vout]", f"[_vout];[_vout]{','.join(post)}") + else: + video_graph = video_graph.replace("[_vout]", "") + + # Wire audio + audio_graph = audio_segs.replace("[_asrc]", "[0:a]", 1) + audio_graph = audio_graph.replace("[_aout]", "") + + return f"{video_graph};{audio_graph}", None + + def plan_full_frame( input_path: Path, output: Path, @@ -151,6 +215,10 @@ def plan_full_frame( """ if profile.speed is not None and not 0.5 <= profile.speed <= 2.0: raise RenderError(f"Speed must be 0.5-2.0, got {profile.speed}") + if profile.speed is not None and profile.speed != 1.0 and profile.speed_segments is not None: + raise RenderError("Cannot use both speed and speed_segments — they are mutually exclusive") + if profile.speed_segments is not None: + validate_speed_segments(profile.speed_segments) filter_complex, audio_filter = build_profile_filter_chain(profile, rendered_subtitle=rendered_subtitle) @@ -187,9 +255,7 @@ def resolve_subtitle_for_profile( if profile.subtitle_template.startswith("builtin:"): from reeln.core.overlay import resolve_builtin_template - template_path = resolve_builtin_template( - profile.subtitle_template.removeprefix("builtin:") - ) + template_path = resolve_builtin_template(profile.subtitle_template.removeprefix("builtin:")) else: template_path = Path(profile.subtitle_template).expanduser() rendered = render_template_file(template_path, context) diff --git a/reeln/core/prompts.py b/reeln/core/prompts.py index 1e9115a..ae851d1 100644 --- a/reeln/core/prompts.py +++ b/reeln/core/prompts.py @@ -230,6 +230,20 @@ def prompt_thumbnail(preset: str | None = None) -> str: return answer +def prompt_tournament(preset: str | None = None) -> str: + """Prompt for a tournament name, or return *preset*. + + Tournament is optional — an empty answer is accepted (returns ``""``). + """ + if preset is not None: + return preset + questionary = _require_questionary() + answer: str | None = questionary.text("Tournament name (optional):").ask() + if answer is None: + return "" + return answer + + def prompt_period_length(preset: int | None = None) -> int: """Prompt for the period/segment length in minutes, or return *preset*. @@ -264,9 +278,11 @@ def collect_game_info_interactive( game_date: str | None = None, venue: str | None = None, game_time: str | None = None, + level: str | None = None, period_length: int | None = None, description: str | None = None, thumbnail: str | None = None, + tournament: str | None = None, ) -> dict[str, Any]: """Collect all game info fields, prompting only for missing values. @@ -288,7 +304,7 @@ def collect_game_info_interactive( away_profile: TeamProfile | None = None if home is None or away is None: - level = prompt_level() + level = prompt_level(preset=level) if home is None: home_profile = prompt_team(level, "home") result["home"] = home_profile.team_name @@ -312,5 +328,6 @@ def collect_game_info_interactive( result["period_length"] = prompt_period_length(preset=period_length) result["description"] = prompt_description(preset=description) result["thumbnail"] = prompt_thumbnail(preset=thumbnail) + result["tournament"] = prompt_tournament(preset=tournament) return result diff --git a/reeln/core/renderer.py b/reeln/core/renderer.py index 23268bc..b9d570e 100644 --- a/reeln/core/renderer.py +++ b/reeln/core/renderer.py @@ -7,13 +7,17 @@ from typing import Protocol from reeln.core.ffmpeg import ( + build_extract_frame_command, build_render_command, build_short_command, probe_duration, + probe_fps, + probe_resolution, run_ffmpeg, ) from reeln.core.log import get_logger from reeln.models.render_plan import RenderPlan, RenderResult +from reeln.models.zoom import ExtractedFrames log: logging.Logger = get_logger(__name__) @@ -25,6 +29,10 @@ def render(self, plan: RenderPlan) -> RenderResult: ... # pragma: no cover def preview(self, plan: RenderPlan) -> RenderResult: ... # pragma: no cover + def extract_frames( # pragma: no cover + self, input_path: Path, count: int, output_dir: Path + ) -> ExtractedFrames: ... + class FFmpegRenderer: """FFmpeg-based renderer.""" @@ -32,20 +40,25 @@ class FFmpegRenderer: def __init__(self, ffmpeg_path: Path) -> None: self.ffmpeg_path = ffmpeg_path - def render(self, plan: RenderPlan) -> RenderResult: + def render(self, plan: RenderPlan, *, emit_hooks: bool = True) -> RenderResult: """Render according to the plan. Dispatches to ``build_short_command`` when the plan has a filter chain, otherwise falls back to ``build_render_command``. + + When *emit_hooks* is ``False``, ``PRE_RENDER`` and ``POST_RENDER`` + hooks are suppressed. Used by iteration rendering to avoid uploading + intermediate files — the final concatenated output is emitted once. """ from reeln.plugins.hooks import Hook, HookContext from reeln.plugins.registry import get_registry registry = get_registry() - registry.emit( - Hook.PRE_RENDER, - HookContext(hook=Hook.PRE_RENDER, data={"plan": plan}), - ) + if emit_hooks: + registry.emit( + Hook.PRE_RENDER, + HookContext(hook=Hook.PRE_RENDER, data={"plan": plan}), + ) if plan.filter_complex is not None: cmd = build_short_command(self.ffmpeg_path, plan) @@ -83,10 +96,11 @@ def render(self, plan: RenderPlan) -> RenderResult: ffmpeg_command=list(cmd), ) - registry.emit( - Hook.POST_RENDER, - HookContext(hook=Hook.POST_RENDER, data={"plan": plan, "result": result}), - ) + if emit_hooks: + registry.emit( + Hook.POST_RENDER, + HookContext(hook=Hook.POST_RENDER, data={"plan": plan, "result": result}), + ) log.info("Render complete: %s", plan.output) return result @@ -97,3 +111,47 @@ def preview(self, plan: RenderPlan) -> RenderResult: Delegates to ``render()`` — the plan already contains preview settings. """ return self.render(plan) + + def extract_frames(self, input_path: Path, count: int, output_dir: Path) -> ExtractedFrames: + """Extract evenly-spaced frames from a video for analysis. + + Probes duration, fps, and resolution, then extracts *count* frames + at evenly-spaced timestamps via single-frame ffmpeg seeks. + """ + from reeln.core.errors import RenderError + + duration = probe_duration(self.ffmpeg_path, input_path) + if duration is None or duration <= 0: + raise RenderError(f"Cannot probe duration of {input_path}") + + resolution = probe_resolution(self.ffmpeg_path, input_path) + if resolution is None: + raise RenderError(f"Cannot probe resolution of {input_path}") + source_width, source_height = resolution + + fps = probe_fps(self.ffmpeg_path, input_path) or 30.0 + + # Calculate evenly-spaced timestamps (avoid exact start/end) + timestamps: tuple[float, ...] + if count == 1: + timestamps = (duration / 2.0,) + else: + step = duration / (count + 1) + timestamps = tuple(step * (i + 1) for i in range(count)) + + frame_paths: list[Path] = [] + for i, ts in enumerate(timestamps): + frame_path = output_dir / f"frame_{i:04d}.png" + cmd = build_extract_frame_command(self.ffmpeg_path, input_path, ts, frame_path) + run_ffmpeg(cmd) + frame_paths.append(frame_path) + + log.debug("Extracted %d frames from %s", len(frame_paths), input_path) + return ExtractedFrames( + frame_paths=tuple(frame_paths), + timestamps=timestamps, + source_width=source_width, + source_height=source_height, + duration=duration, + fps=fps, + ) diff --git a/reeln/core/shorts.py b/reeln/core/shorts.py index 613623c..b32e366 100644 --- a/reeln/core/shorts.py +++ b/reeln/core/shorts.py @@ -5,8 +5,10 @@ from pathlib import Path from reeln.core.errors import RenderError +from reeln.models.profile import SpeedSegment from reeln.models.render_plan import RenderPlan from reeln.models.short import CropMode, ShortConfig +from reeln.models.zoom import ZoomPath _VALID_LUT_SUFFIXES: set[str] = {".cube", ".3dl"} _VALID_SUBTITLE_SUFFIXES: set[str] = {".ass"} @@ -33,10 +35,53 @@ def validate_short_config(config: ShortConfig) -> None: raise RenderError(f"Anchor X must be 0.0-1.0, got {config.anchor_x}") if not 0.0 <= config.anchor_y <= 1.0: raise RenderError(f"Anchor Y must be 0.0-1.0, got {config.anchor_y}") + if not 0.5 <= config.scale <= 3.0: + raise RenderError(f"Scale must be 0.5-3.0, got {config.scale}") + if not 1 <= config.smart_zoom_frames <= 20: + raise RenderError(f"Smart zoom frames must be 1-20, got {config.smart_zoom_frames}") if config.lut is not None and config.lut.suffix.lower() not in _VALID_LUT_SUFFIXES: raise RenderError(f"LUT file must be .cube or .3dl, got {config.lut.suffix!r}") if config.subtitle is not None and config.subtitle.suffix.lower() not in _VALID_SUBTITLE_SUFFIXES: raise RenderError(f"Subtitle file must be .ass, got {config.subtitle.suffix!r}") + if config.branding is not None and config.branding.suffix.lower() not in _VALID_SUBTITLE_SUFFIXES: + raise RenderError(f"Branding file must be .ass, got {config.branding.suffix!r}") + if config.speed_segments is not None and config.speed != 1.0: + raise RenderError("Cannot use both speed and speed_segments — they are mutually exclusive") + if config.speed_segments is not None: + validate_speed_segments(config.speed_segments) + + +def validate_speed_segments(segments: tuple[SpeedSegment, ...]) -> None: + """Validate speed segment list. + + Rules: + - At least 2 segments (otherwise use scalar ``speed``) + - All segments except the last must have ``until`` set + - Last segment must have ``until=None`` + - ``until`` values must be strictly increasing and positive + - All speeds must be in [0.25, 4.0] range + """ + if len(segments) < 2: + raise RenderError("speed_segments requires at least 2 segments (use scalar speed for uniform speed)") + for i, seg in enumerate(segments): + if not 0.25 <= seg.speed <= 4.0: + raise RenderError(f"speed_segments[{i}]: speed must be 0.25-4.0, got {seg.speed}") + for i, seg in enumerate(segments[:-1]): + if seg.until is None: + raise RenderError(f"speed_segments[{i}]: all segments except the last must have 'until' set") + if seg.until <= 0: + raise RenderError(f"speed_segments[{i}]: 'until' must be positive, got {seg.until}") + if segments[-1].until is not None: + raise RenderError("speed_segments: last segment must have until=None (runs to end of clip)") + prev_until = 0.0 + for i, seg in enumerate(segments[:-1]): + assert seg.until is not None + if seg.until <= prev_until: + raise RenderError( + f"speed_segments[{i}]: 'until' values must be strictly increasing, " + f"got {seg.until} after {prev_until}" + ) + prev_until = seg.until # --------------------------------------------------------------------------- @@ -44,15 +89,37 @@ def validate_short_config(config: ShortConfig) -> None: # --------------------------------------------------------------------------- -def build_scale_filter(*, crop_mode: CropMode, target_width: int, target_height: int) -> str: +def _round_even(value: int) -> int: + """Round *value* up to the nearest even integer.""" + return value + (value % 2) + + +def build_scale_filter(*, crop_mode: CropMode, target_width: int, target_height: int, scale: float = 1.0) -> str: """Build the initial scale filter for pad or crop mode. Pad mode scales to target width (content fits within frame). Crop mode scales to target height (content fills the frame). + + When *scale* > 1.0 the intermediate dimensions are larger, producing + a zoom-in effect after subsequent crop/pad. """ if crop_mode == CropMode.PAD: - return f"scale={target_width}:-2:flags=lanczos" - return f"scale=-2:{target_height}:flags=lanczos" + w = _round_even(int(target_width * scale)) + return f"scale={w}:-2:flags=lanczos" + h = _round_even(int(target_height * scale)) + return f"scale=-2:{h}:flags=lanczos" + + +def build_overflow_crop_filter(*, target_width: int, target_height: int) -> str: + """Crop to target dimensions when the intermediate frame overflows. + + Used with pad + scale > 1.0: after scaling up and before padding, + crop any overflow back to target size (centered). + """ + return ( + f"crop=w='min(iw,{target_width})':h='min(ih,{target_height})':" + f"x='(iw-min(iw,{target_width}))/2':y='(ih-min(ih,{target_height}))/2'" + ) def build_pad_filter(*, target_width: int, target_height: int, pad_color: str) -> str: @@ -86,12 +153,138 @@ def build_audio_speed_filter(speed: float) -> str: def build_lut_filter(lut_path: Path) -> str: """Build a LUT color grading filter.""" - return f"lut3d='{lut_path}'" + return f"lut3d={_escape_filter_path(lut_path)}" + + +def _escape_filter_path(path: Path) -> str: + """Escape a file path for use in ffmpeg filter option values. + + ffmpeg's filter parser treats several characters as special inside + filter option values. Each must be backslash-escaped so the path + is treated literally — no wrapping single quotes needed. + + Escaped characters: ``\\``, ``:``, ``'``, ``[``, ``]``, ``;``, ``,``. + """ + s = str(path) + # Order matters: escape backslash first to avoid double-escaping + s = s.replace("\\", "\\\\") + s = s.replace(":", "\\:") + s = s.replace("'", "\\'") + s = s.replace("[", "\\[") + s = s.replace("]", "\\]") + s = s.replace(";", "\\;") + s = s.replace(",", "\\,") + return s def build_subtitle_filter(subtitle_path: Path) -> str: - """Build an ASS subtitle overlay filter.""" - return f"ass='{subtitle_path}'" + """Build an ASS subtitle overlay filter. + + Uses the ``subtitles`` filter (not ``ass``) for broader ffmpeg + compatibility — homebrew and other builds sometimes omit the ``ass`` + filter alias. The explicit ``f=`` option key avoids positional + argument issues in multi-segment ``-filter_complex`` graphs. + """ + return f"subtitles=f={_escape_filter_path(subtitle_path)}" + + +# --------------------------------------------------------------------------- +# Variable-speed segment filters +# --------------------------------------------------------------------------- + + +def _build_atempo_chain(speed: float) -> str: + """Build chained atempo filters for speeds outside [0.5, 100.0]. + + ffmpeg's ``atempo`` filter accepts values in [0.5, 100.0]. For speeds + below 0.5, chain multiple ``atempo=0.5`` filters to reach the target. + """ + if 0.5 <= speed <= 100.0: + return f"atempo={speed}" + parts: list[str] = [] + remaining = speed + while remaining < 0.5: + parts.append("atempo=0.5") + remaining /= 0.5 + parts.append(f"atempo={remaining}") + return ",".join(parts) + + +def compute_speed_segments_duration( + segments: tuple[SpeedSegment, ...], + source_duration: float, +) -> float: + """Compute output duration after applying speed segments to a source clip. + + Each segment's source-time span is divided by its speed to get the output + duration. The last segment (``until=None``) runs to *source_duration*. + """ + total = 0.0 + prev = 0.0 + for seg in segments: + end = seg.until if seg.until is not None else source_duration + span = max(0.0, end - prev) + total += span / seg.speed + prev = end + return total + + +def build_speed_segments_filters( + segments: tuple[SpeedSegment, ...], +) -> tuple[str, str]: + """Build video and audio filter graph fragments for variable-speed segments. + + Returns ``(video_fragment, audio_fragment)`` — both are semicolon-separated + filter_complex fragments with stream labels. + + Video pattern: + [_vsrc]split=N[v0]...[vN-1]; + [v0]trim=0:5,setpts=PTS-STARTPTS,setpts=PTS/speed[sv0]; ... + [sv0]...[svN-1]concat=n=N:v=1:a=0[_vout] + + Audio pattern: + [_asrc]asplit=N[a0]...[aN-1]; + [a0]atrim=0:5,asetpts=PTS-STARTPTS,atempo=speed[sa0]; ... + [sa0]...[saN-1]concat=n=N:v=0:a=1[_aout] + """ + n = len(segments) + + # Build time boundaries + boundaries: list[tuple[float | None, float | None]] = [] + prev = 0.0 + for seg in segments: + boundaries.append((prev, seg.until)) + prev = seg.until if seg.until is not None else prev + + # Video split + v_labels = [f"[v{i}]" for i in range(n)] + sv_labels = [f"[sv{i}]" for i in range(n)] + video_parts: list[str] = [f"[_vsrc]split={n}{''.join(v_labels)}"] + + for i, (seg, (start, end)) in enumerate(zip(segments, boundaries, strict=True)): + trim = f"trim={start}" if end is None else f"trim={start}:{end}" + chain = [trim, "setpts=PTS-STARTPTS"] + if seg.speed != 1.0: + chain.append(f"setpts=PTS/{seg.speed}") + video_parts.append(f"{v_labels[i]}{',' .join(chain)}{sv_labels[i]}") + + video_parts.append(f"{''.join(sv_labels)}concat=n={n}:v=1:a=0[_vout]") + + # Audio split + a_labels = [f"[a{i}]" for i in range(n)] + sa_labels = [f"[sa{i}]" for i in range(n)] + audio_parts: list[str] = [f"[_asrc]asplit={n}{''.join(a_labels)}"] + + for i, (seg, (start, end)) in enumerate(zip(segments, boundaries, strict=True)): + atrim = f"atrim={start}" if end is None else f"atrim={start}:{end}" + chain = [atrim, "asetpts=PTS-STARTPTS"] + if seg.speed != 1.0: + chain.append(_build_atempo_chain(seg.speed)) + audio_parts.append(f"{a_labels[i]}{',' .join(chain)}{sa_labels[i]}") + + audio_parts.append(f"{''.join(sa_labels)}concat=n={n}:v=0:a=1[_aout]") + + return ";".join(video_parts), ";".join(audio_parts) # --------------------------------------------------------------------------- @@ -99,13 +292,56 @@ def build_subtitle_filter(subtitle_path: Path) -> str: # --------------------------------------------------------------------------- -def build_filter_chain(config: ShortConfig) -> tuple[str, str | None]: +def _resolve_smart(crop_mode: CropMode, smart: bool) -> tuple[CropMode, bool]: + """Translate deprecated SMART/SMART_PAD enums to effective mode + smart flag. + + Returns ``(effective_crop_mode, is_smart)`` where *effective_crop_mode* is + always ``PAD`` or ``CROP``. + """ + if crop_mode == CropMode.SMART: + return CropMode.CROP, True + if crop_mode == CropMode.SMART_PAD: + return CropMode.PAD, True + return crop_mode, smart + + +def build_filter_chain( + config: ShortConfig, + *, + zoom_path: ZoomPath | None = None, + source_fps: float = 30.0, +) -> tuple[str, str | None]: """Assemble the full video filter chain and optional audio filter. - Filter ordering: LUT -> speed -> scale -> pad/crop+final_scale -> subtitle. + Filter ordering: LUT -> speed -> scale -> overflow_crop -> pad/crop -> final_scale -> subtitle. + + The chain is driven by two orthogonal axes: + - **Framing:** ``PAD`` or ``CROP`` (deprecated ``SMART``/``SMART_PAD`` are translated). + - **Scale:** multiplier applied to the initial scale dimensions (1.0 = no zoom). + + When *zoom_path* is provided and smart mode is active, dynamic crop/pad + expressions replace the static anchor-based alternatives. Returns ``(filter_complex_string, audio_filter_or_none)``. """ + from reeln.core.zoom import build_smart_crop_filter, build_smart_pad_graph + + effective_crop, is_smart = _resolve_smart(config.crop_mode, config.smart) + + if is_smart and zoom_path is None: + raise RenderError( + "Smart crop mode requires a zoom path from a vision plugin. " + "Ensure a plugin providing ON_FRAMES_EXTRACTED analysis is enabled." + ) + + # Variable-speed path: uses split/concat with stream labels + if config.speed_segments is not None: + return _build_speed_segments_chain( + config, + zoom_path=zoom_path if is_smart else None, + source_fps=source_fps, + ) + filters: list[str] = [] # 1. LUT (color grade source first) @@ -116,56 +352,285 @@ def build_filter_chain(config: ShortConfig) -> tuple[str, str | None]: if config.speed != 1.0: filters.append(build_speed_filter(config.speed)) - # 3. Scale + pad/crop + # 3. Scale (with scale factor) + # Smart pad scales by height (like crop) so the video is wider than + # the target — this gives the overlay horizontal room to pan. + scale_mode = CropMode.CROP if (effective_crop == CropMode.PAD and is_smart) else effective_crop filters.append( build_scale_filter( - crop_mode=config.crop_mode, + crop_mode=scale_mode, target_width=config.width, target_height=config.height, + scale=config.scale, ) ) - if config.crop_mode == CropMode.PAD: + + # 4. Overflow crop (pad + scale > 1.0 only, not smart pad) + if effective_crop == CropMode.PAD and not is_smart and config.scale > 1.0: filters.append( - build_pad_filter( + build_overflow_crop_filter( target_width=config.width, target_height=config.height, - pad_color=config.pad_color, ) ) - else: + + # 5. Crop or pad (static or smart) + if effective_crop == CropMode.PAD and is_smart: + # Smart pad uses overlay on a colour background because ffmpeg's + # pad filter does not support the ``t`` variable in expressions. + # build_smart_pad_graph returns a complete filter_complex string + # with stream labels, so we return it directly instead of joining. + assert zoom_path is not None # guarded above + post_filters: list[str] = [] + if config.subtitle is not None: + post_filters.append(build_subtitle_filter(config.subtitle)) + if config.branding is not None: + post_filters.append(build_subtitle_filter(config.branding)) + + audio_filter: str | None = None + if config.speed != 1.0: + audio_filter = build_audio_speed_filter(config.speed) + + filter_complex = build_smart_pad_graph( + pre_filters=filters, + zoom_path=zoom_path, + target_width=config.width, + target_height=config.height, + pad_color=config.pad_color, + post_filters=post_filters or None, + source_fps=source_fps, + ) + return filter_complex, audio_filter + + if effective_crop == CropMode.PAD: filters.append( - build_crop_filter( + build_pad_filter( target_width=config.width, target_height=config.height, - anchor_x=config.anchor_x, - anchor_y=config.anchor_y, + pad_color=config.pad_color, ) ) + else: + if is_smart: + assert zoom_path is not None # guarded above + filters.append(build_smart_crop_filter(zoom_path, config.width, config.height)) + else: + filters.append( + build_crop_filter( + target_width=config.width, + target_height=config.height, + anchor_x=config.anchor_x, + anchor_y=config.anchor_y, + ) + ) + # 6. Final scale (crop mode only — ensures exact output dimensions) filters.append(build_final_scale_filter(target_width=config.width, target_height=config.height)) - # 4. Subtitle (render at output resolution) + # 7. Subtitle (render at output resolution) if config.subtitle is not None: filters.append(build_subtitle_filter(config.subtitle)) + # 8. Branding (after subtitle) + if config.branding is not None: + filters.append(build_subtitle_filter(config.branding)) + filter_complex = ",".join(filters) # Audio filter - audio_filter: str | None = None + audio_filter = None if config.speed != 1.0: audio_filter = build_audio_speed_filter(config.speed) return filter_complex, audio_filter +def _build_speed_segments_chain( + config: ShortConfig, + *, + zoom_path: ZoomPath | None = None, + source_fps: float = 30.0, +) -> tuple[str, str | None]: + """Build a full filter_complex for speed_segments rendering. + + When speed_segments are active, video and audio both go through + ``-filter_complex`` (audio_filter returns ``None``). + + When *zoom_path* is provided (smart pad mode), the post-concat video + uses ``overlay`` on a colour background with ``t``-based panning + expressions — the same approach as ``build_smart_pad_graph`` but + wired after the speed-segments concat instead of directly from + ``[0:v]``. + + Graph structure (static): + [0:v]{pre},split=N... → trim/speed → concat → {post} [vfinal] + [0:a]asplit=N... → atrim/atempo → concat [afinal] + + Graph structure (smart pad): + [0:v]{pre},split=N... → trim/speed → concat → scale [_fg] + color=...[_bg]; [_bg][_fg]overlay=...[vfinal] (or with subtitle) + [0:a]asplit=N... → atrim/atempo → concat [afinal] + """ + from reeln.core.zoom import build_smart_pad_filter + + assert config.speed_segments is not None + effective_crop, is_smart = _resolve_smart(config.crop_mode, config.smart) + use_smart_pad = effective_crop == CropMode.PAD and is_smart and zoom_path is not None + + # Pre-speed filters (applied to source before split) + pre: list[str] = [] + if config.lut is not None: + pre.append(build_lut_filter(config.lut)) + + # Post-speed filters (applied after concat) + post: list[str] = [] + + # Scale — PAD mode uses height-based scaling (same as smart pad) so + # landscape sources fill the frame vertically. After scale, overflow + # crop clips the horizontal excess to target_width. + if effective_crop == CropMode.PAD: + post.append( + build_scale_filter( + crop_mode=CropMode.CROP, + target_width=config.width, + target_height=config.height, + scale=config.scale, + ) + ) + if not use_smart_pad: + post.append( + build_overflow_crop_filter( + target_width=config.width, + target_height=config.height, + ) + ) + else: + post.append( + build_scale_filter( + crop_mode=effective_crop, + target_width=config.width, + target_height=config.height, + scale=config.scale, + ) + ) + + if use_smart_pad: + # Smart pad after speed_segments: overlay on colour background. + # Post-filters so far contain only scale (height-based). + # We label the scaled output [_fg], generate a colour source [_bg], + # and overlay with t-based expressions. + assert zoom_path is not None + + video_segs, audio_segs = build_speed_segments_filters(config.speed_segments) + + # Wire pre-filters into the video source label + if pre: + video_graph = video_segs.replace("[_vsrc]", f"[0:v]{','.join(pre)},", 1) + else: + video_graph = video_segs.replace("[_vsrc]", "[0:v]", 1) + + # Wire post-filters (scale) after concat, label [_fg] + video_graph = video_graph.replace( + "[_vout]", f"[_vout];[_vout]{','.join(post)}[_fg]", + ) + + # Colour background and overlay + from reeln.core.zoom import _fps_to_fraction + + fps_frac = _fps_to_fraction(source_fps) + overlay_expr = build_smart_pad_filter( + zoom_path, config.width, config.height, config.pad_color, + ) + + color_part = f"color=c={config.pad_color}:s={config.width}x{config.height}:r={fps_frac}[_bg]" + overlay_part = f"[_bg][_fg]{overlay_expr}" + + post_overlay: list[str] = [] + if config.subtitle is not None: + post_overlay.append(build_subtitle_filter(config.subtitle)) + if config.branding is not None: + post_overlay.append(build_subtitle_filter(config.branding)) + + if post_overlay: + overlay_part = f"{overlay_part}[_ov];[_ov]format=yuv420p,{','.join(post_overlay)}[vfinal]" + else: + overlay_part = f"{overlay_part}[vfinal]" + + video_graph = f"{video_graph};{color_part};{overlay_part}" + + # Audio + audio_graph = audio_segs.replace("[_asrc]", "[0:a]", 1) + audio_graph = audio_graph.replace("[_aout]", "[afinal]") + + return f"{video_graph};{audio_graph}", None + + # Crop or pad (static) + if effective_crop == CropMode.PAD: + post.append( + build_pad_filter( + target_width=config.width, + target_height=config.height, + pad_color=config.pad_color, + ) + ) + else: + if is_smart and zoom_path is not None: + from reeln.core.zoom import build_smart_crop_filter + + post.append(build_smart_crop_filter(zoom_path, config.width, config.height)) + else: + post.append( + build_crop_filter( + target_width=config.width, + target_height=config.height, + anchor_x=config.anchor_x, + anchor_y=config.anchor_y, + ) + ) + post.append(build_final_scale_filter(target_width=config.width, target_height=config.height)) + + # Subtitle + if config.subtitle is not None: + post.append(build_subtitle_filter(config.subtitle)) + + # Branding (after subtitle) + if config.branding is not None: + post.append(build_subtitle_filter(config.branding)) + + video_segs, audio_segs = build_speed_segments_filters(config.speed_segments) + + # Wire pre-filters into the video source label + if pre: + # Replace [_vsrc] with [0:v]pre_filters + video_graph = video_segs.replace("[_vsrc]", f"[0:v]{','.join(pre)},", 1) + else: + video_graph = video_segs.replace("[_vsrc]", "[0:v]", 1) + + # Wire post-filters after the concat output label + # post always has at least scale + crop/pad + video_graph = video_graph.replace("[_vout]", f"[_vout];[_vout]{','.join(post)}[vfinal]") + + # Wire audio source label — keep [afinal] for explicit mapping + audio_graph = audio_segs.replace("[_asrc]", "[0:a]", 1) + audio_graph = audio_graph.replace("[_aout]", "[afinal]") + + filter_complex = f"{video_graph};{audio_graph}" + return filter_complex, None + + # --------------------------------------------------------------------------- # Plan builders # --------------------------------------------------------------------------- -def plan_short(config: ShortConfig) -> RenderPlan: +def plan_short( + config: ShortConfig, + *, + zoom_path: ZoomPath | None = None, + source_fps: float = 30.0, +) -> RenderPlan: """Create a RenderPlan for a short-form render.""" validate_short_config(config) - filter_complex, audio_filter = build_filter_chain(config) + filter_complex, audio_filter = build_filter_chain(config, zoom_path=zoom_path, source_fps=source_fps) return RenderPlan( inputs=[config.input], output=config.output, @@ -200,8 +665,11 @@ def plan_preview(config: ShortConfig) -> RenderPlan: crop_mode=config.crop_mode, anchor_x=config.anchor_x, anchor_y=config.anchor_y, + scale=config.scale, + smart=config.smart, pad_color=config.pad_color, speed=config.speed, + speed_segments=config.speed_segments, lut=config.lut, subtitle=config.subtitle, codec=config.codec, @@ -209,6 +677,7 @@ def plan_preview(config: ShortConfig) -> RenderPlan: crf=28, audio_codec=config.audio_codec, audio_bitrate=config.audio_bitrate, + branding=config.branding, ) filter_complex, audio_filter = build_filter_chain(preview) return RenderPlan( diff --git a/reeln/core/teams.py b/reeln/core/teams.py index 5ba8d7a..8270acf 100644 --- a/reeln/core/teams.py +++ b/reeln/core/teams.py @@ -1,15 +1,21 @@ -"""Team profile management: load, save, list, delete.""" +"""Team profile management: load, save, list, delete, roster lookup.""" from __future__ import annotations +import csv +import io import json +import logging import re import tempfile from pathlib import Path -from reeln.core.config import config_dir +from reeln.core.config import _config_base_dir from reeln.core.errors import ConfigError -from reeln.models.team import TeamProfile, dict_to_team_profile, team_profile_to_dict +from reeln.models.game import GameInfo +from reeln.models.team import RosterEntry, TeamProfile, dict_to_team_profile, team_profile_to_dict + +logger = logging.getLogger(__name__) def slugify(name: str) -> str: @@ -23,7 +29,7 @@ def slugify(name: str) -> str: def _teams_base_dir() -> Path: """Return the base directory for team profile storage.""" - return config_dir() / "teams" + return _config_base_dir() / "teams" def load_team_profile(level: str, slug: str) -> TeamProfile: @@ -100,3 +106,91 @@ def delete_team_profile(level: str, slug: str) -> bool: return False path.unlink() return True + + +# --------------------------------------------------------------------------- +# Roster loading and player lookup +# --------------------------------------------------------------------------- + + +def load_roster(roster_path: Path) -> dict[str, RosterEntry]: + """Load a roster CSV and return a dict keyed by jersey number. + + CSV format: ``number,name,position`` (with header row). + Raises ``ConfigError`` if the file is missing, unreadable, or malformed. + """ + if not roster_path.is_file(): + raise ConfigError(f"Roster file not found: {roster_path}") + try: + text = roster_path.read_text(encoding="utf-8") + except OSError as exc: + raise ConfigError(f"Failed to read roster file {roster_path}: {exc}") from exc + + reader = csv.DictReader(io.StringIO(text)) + if reader.fieldnames is None: + raise ConfigError(f"Roster CSV is empty or has no header: {roster_path}") + + required = {"number", "name", "position"} + missing = required - {f.strip().lower() for f in reader.fieldnames} + if missing: + raise ConfigError(f"Roster CSV missing required columns {sorted(missing)}: {roster_path}") + + roster: dict[str, RosterEntry] = {} + for row in reader: + # Normalize field names to lowercase and strip whitespace + cleaned = {k.strip().lower(): v.strip() for k, v in row.items() if k is not None} + number = cleaned.get("number", "").strip() + if not number: + continue + roster[number] = RosterEntry( + number=number, + name=cleaned.get("name", ""), + position=cleaned.get("position", ""), + ) + return roster + + +def lookup_players( + roster: dict[str, RosterEntry], + numbers: list[str], + team_name: str, +) -> tuple[str, list[str]]: + """Look up player names by jersey numbers from a roster. + + First number is the primary player (goal scorer), rest are assists. + Returns ``(scorer_display, [assist_displays])``. + + Format: ``"#48 LastName"`` — falls back to ``"#48"`` if number not in roster. + Warns on missing numbers but does not error. + """ + if not numbers: + return ("", []) + + def _format(num: str) -> str: + entry = roster.get(num) + if entry is None: + logger.warning("Warning: #%s not found in %s roster, using '#%s'", num, team_name, num) + return f"#{num}" + return f"#{num} {entry.name.strip()}" + + scorer = _format(numbers[0]) + assists = [_format(n) for n in numbers[1:]] + return (scorer, assists) + + +def resolve_scoring_team( + event_type: str, + game_info: GameInfo, +) -> tuple[str, str, str]: + """Determine which team scored from the event type. + + Returns ``(team_name, team_slug, level)`` for the scoring team. + Uses prefix matching: ``home_*``/``HOME_*`` → home team, + ``away_*``/``AWAY_*`` → away team. + Defaults to home team when event type doesn't match either pattern. + """ + lower = event_type.lower() + if lower.startswith("away_"): + return (game_info.away_team, game_info.away_slug, game_info.level) + # home_* or anything else defaults to home + return (game_info.home_team, game_info.home_slug, game_info.level) diff --git a/reeln/core/templates.py b/reeln/core/templates.py index db916a2..02c347d 100644 --- a/reeln/core/templates.py +++ b/reeln/core/templates.py @@ -90,6 +90,8 @@ def build_base_context( "game_number": str(game_info.game_number), "game_time": game_info.game_time, "period_length": str(game_info.period_length), + "level": game_info.level, + "tournament": game_info.tournament, } if event is not None: variables["event_type"] = event.event_type diff --git a/reeln/core/zoom.py b/reeln/core/zoom.py new file mode 100644 index 0000000..5462b82 --- /dev/null +++ b/reeln/core/zoom.py @@ -0,0 +1,257 @@ +"""Piecewise linear interpolation and smart crop filter builders for zoom paths.""" + +from __future__ import annotations + +from dataclasses import replace +from fractions import Fraction + +from reeln.models.profile import SpeedSegment +from reeln.models.zoom import ZoomPath + +# ffmpeg's expression parser has a hard limit on total expression complexity. +# With pre-computed A*t+B coefficients, 8 segments is safe; beyond that the +# parser fails with "Missing ')' or too many args". +_MAX_LERP_SEGMENTS = 8 + + +def _downsample( + values: list[tuple[float, float]], + max_points: int, +) -> list[tuple[float, float]]: + """Reduce *values* to at most *max_points* by selecting evenly spaced entries. + + Always preserves the first and last points so interpolation covers the + full time range. Returns the original list unchanged when it is already + within the limit. + """ + if len(values) <= max_points: + return values + + # Always keep first and last; distribute remaining slots evenly + result: list[tuple[float, float]] = [values[0]] + inner_slots = max_points - 2 + step = (len(values) - 1) / (inner_slots + 1) + for i in range(1, inner_slots + 1): + idx = round(i * step) + result.append(values[idx]) + result.append(values[-1]) + return result + + +def build_piecewise_lerp( + values: list[tuple[float, float]], + total_duration: float, + time_expr: str = "t", +) -> str: + """Build a flat ffmpeg expression for piecewise linear interpolation. + + *values* is a list of ``(timestamp, value)`` pairs, sorted by timestamp. + Returns an ffmpeg expression that linearly interpolates between consecutive + pairs, clamping to the last value after the final timestamp. + + Uses a sum-of-products approach (``lt()*lerp + gte()*lt()*lerp + ...``) + instead of nested ``if()`` calls to avoid hitting ffmpeg's expression + parser nesting depth limit with many zoom points. + + For a single point, returns the constant value. + """ + if not values: + return "0" + + if len(values) == 1: + return str(values[0][1]) + + # Downsample to stay within ffmpeg expression parser limits. + # _MAX_LERP_SEGMENTS segments need _MAX_LERP_SEGMENTS + 1 points. + values = _downsample(values, _MAX_LERP_SEGMENTS + 1) + + terms: list[str] = [] + + def _fmt(v: float) -> str: + """Format a float, rounding to 6 decimal places and stripping trailing zeros.""" + return f"{v:.6f}".rstrip("0").rstrip(".") + + for i in range(len(values) - 1): + t_i, v_i = values[i] + t_next, v_next = values[i + 1] + dt = t_next - t_i + + if dt == 0: + lerp = _fmt(v_i) + else: + # Pre-compute slope (A) and intercept (B) so lerp = A*t+B + # instead of (V+DV*(t-T)/DT) — roughly halves expression length. + a = (v_next - v_i) / dt + b = v_i - a * t_i + lerp = f"({_fmt(a)}*{time_expr}+{_fmt(b)})" + + if i == 0: + # First segment: covers t < t_next (includes extrapolation before t0) + terms.append(f"lt({time_expr},{_fmt(t_next)})*{lerp}") + else: + # Middle segments: gte(t, t_i) AND lt(t, t_next) + terms.append(f"gte({time_expr},{_fmt(t_i)})*lt({time_expr},{_fmt(t_next)})*{lerp}") + + # Clamp to last value after final timestamp + terms.append(f"gte({time_expr},{_fmt(values[-1][0])})*{_fmt(values[-1][1])}") + + return "+".join(terms) + + +def build_smart_crop_filter( + zoom_path: ZoomPath, + target_width: int, + target_height: int, +) -> str: + """Build a dynamic crop filter expression from a zoom path. + + Generates an ffmpeg ``crop=w:h:x:y`` filter where x and y use + piecewise-lerp expressions derived from the zoom points. The crop + region is clamped to source bounds. + + The input is assumed to already be scaled so that its height matches + the source aspect (scale=-2:source_height). The crop extracts a + vertical slice of width ``ih*target_width/target_height``. + """ + crop_w = f"ih*{target_width}/{target_height}" + crop_h = "ih" + + # Build x interpolation from center_x values + # center_x is normalized 0-1. Map to pixel x offset: + # x = center_x * (iw - crop_w) clamped to [0, iw - crop_w] + x_values = [(p.timestamp, p.center_x) for p in zoom_path.points] + x_lerp = build_piecewise_lerp(x_values, zoom_path.duration) + x_expr = f"max(0,min(iw-{crop_w},({x_lerp})*(iw-{crop_w})))" + + # Build y interpolation from center_y values + y_values = [(p.timestamp, p.center_y) for p in zoom_path.points] + y_lerp = build_piecewise_lerp(y_values, zoom_path.duration) + y_expr = f"max(0,min(ih-{crop_h},({y_lerp})*(ih-{crop_h})))" + + # Single-quote each value so ffmpeg's filter parser doesn't split on + # the commas inside if()/max()/min() function arguments. + return f"crop=w='{crop_w}':h='{crop_h}':x='{x_expr}':y='{y_expr}'" + + +def build_smart_pad_filter( + zoom_path: ZoomPath, + target_width: int, + target_height: int, + pad_color: str = "black", +) -> str: + """Build a dynamic overlay-based pad filter that follows the action. + + The ``pad`` filter's expression evaluator does not support the ``t`` + variable even with ``eval=frame``, so we use ``overlay`` on a generated + colour background instead. The returned string is an **overlay + expression** (not a pad filter) that expects the scaled video as its + second overlay input — callers must wire it into a multi-stream + ``filter_complex`` graph. + + Only the horizontal axis (center_x) tracks the action — vertical + position stays centered. Vertical tracking would be disorienting + in pad mode; it only makes sense when zooming in (crop mode). + + ``build_smart_pad_graph`` wraps this into the full multi-stream graph. + """ + # Build x interpolation from center_x values. + # center_x is 0-1 in the original frame. We want the action point + # centred horizontally: x = W/2 - center_x * w. + # The video is wider than the background (scaled by height), so x + # ranges from (W-w) to 0 (both ≤ 0). Clamp accordingly. + x_values = [(p.timestamp, p.center_x) for p in zoom_path.points] + x_lerp = build_piecewise_lerp(x_values, zoom_path.duration) + x_expr = f"min(0,max(W-w,W/2-({x_lerp})*w))" + + return f"overlay=x='{x_expr}':y='(H-h)/2':eval=frame:shortest=1" + + +def remap_zoom_path_for_speed_segments( + zoom_path: ZoomPath, + segments: tuple[SpeedSegment, ...], +) -> ZoomPath: + """Remap zoom path timestamps from source time to output time. + + After speed_segments processing the output timeline differs from the + source because segments run at different speeds. This adjusts each + zoom point's timestamp so ``t``-based ffmpeg expressions align with + the rendered output. + """ + + def _source_to_output(source_t: float) -> float: + output_t = 0.0 + prev = 0.0 + for seg in segments: + end = seg.until if seg.until is not None else source_t + if source_t <= end: + output_t += (source_t - prev) / seg.speed + return output_t + output_t += (end - prev) / seg.speed + prev = end + return output_t + + remapped = tuple( + replace(p, timestamp=_source_to_output(p.timestamp)) + for p in zoom_path.points + ) + new_duration = _source_to_output(zoom_path.duration) + return replace(zoom_path, points=remapped, duration=new_duration) + + +def _fps_to_fraction(fps: float) -> str: + """Convert an fps float to an exact fraction string for ffmpeg. + + Uses ``fractions.Fraction`` with a denominator limit to recover common + NTSC rates (e.g. 59.94… → ``60000/1001``, 29.97… → ``30000/1001``) + without floating-point truncation artifacts. + """ + frac = Fraction(fps).limit_denominator(10000) + return f"{frac.numerator}/{frac.denominator}" + + +def build_smart_pad_graph( + pre_filters: list[str], + zoom_path: ZoomPath, + target_width: int, + target_height: int, + pad_color: str = "black", + *, + post_filters: list[str] | None = None, + source_fps: float = 30.0, +) -> str: + """Build a full ``filter_complex`` graph for smart pad mode. + + The ``pad`` filter cannot evaluate ``t``-based expressions, so we + generate a colour background source and ``overlay`` the scaled video + on top with per-frame y positioning. + + *pre_filters* are applied to ``[0:v]`` before the overlay (e.g. LUT, + speed, scale). *post_filters* are appended after the overlay (e.g. + subtitle). + + Returns a complete ``filter_complex`` string with stream labels. + """ + overlay_expr = build_smart_pad_filter( + zoom_path, + target_width, + target_height, + pad_color, + ) + + pre_chain = ",".join(pre_filters) if pre_filters else "null" + fps_frac = _fps_to_fraction(source_fps) + parts = [ + f"color=c={pad_color}:s={target_width}x{target_height}:r={fps_frac}[_bg]", + f"[0:v]{pre_chain}[_fg]", + f"[_bg][_fg]{overlay_expr}", + ] + + if post_filters: + # Pipe through format=yuv420p as a buffer between the overlay + # and post-filters — directly comma-chaining or using stream + # labels after the single-quoted overlay expression confuses + # ffmpeg's graph-level parser. + parts[-1] = f"{parts[-1]}[_ov]" + parts.append(f"[_ov]format=yuv420p,{','.join(post_filters)}") + + return ";".join(parts) diff --git a/reeln/core/zoom_debug.py b/reeln/core/zoom_debug.py new file mode 100644 index 0000000..84413b5 --- /dev/null +++ b/reeln/core/zoom_debug.py @@ -0,0 +1,201 @@ +"""Debug output for smart zoom: write extracted frames and zoom path data.""" + +from __future__ import annotations + +import json +import logging +import shutil +import subprocess +from pathlib import Path + +from reeln.core.log import get_logger +from reeln.core.zoom import build_piecewise_lerp, build_smart_crop_filter +from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + +log: logging.Logger = get_logger(__name__) + + +def _build_annotate_command( + ffmpeg_path: Path, + frame_path: Path, + output_path: Path, + point: ZoomPoint, + source_width: int, + source_height: int, + target_width: int, + target_height: int, +) -> list[str]: + """Build an ffmpeg command to draw crosshairs and crop box on a frame. + + Draws: + - A red crosshair at the detected center point + - A green rectangle showing the crop region + """ + cx = int(point.center_x * source_width) + cy = int(point.center_y * source_height) + + # Crop box dimensions (same ratio as the smart crop filter) + crop_w = int(source_height * target_width / target_height) + crop_h = source_height + + # Crop box position (clamped to source bounds) + box_x = max(0, min(source_width - crop_w, int(point.center_x * (source_width - crop_w)))) + box_y = max(0, min(source_height - crop_h, int(point.center_y * (source_height - crop_h)))) + + # Crosshair lines (horizontal + vertical through center, 2px thick) + cross_len = 40 + filters = [ + # Crop box (green) + f"drawbox=x={box_x}:y={box_y}:w={crop_w}:h={crop_h}:color=green@0.6:t=3", + # Horizontal crosshair + f"drawbox=x={max(0, cx - cross_len)}:y={max(0, cy - 1)}:w={cross_len * 2}:h=2:color=red:t=fill", + # Vertical crosshair + f"drawbox=x={max(0, cx - 1)}:y={max(0, cy - cross_len)}:w=2:h={cross_len * 2}:color=red:t=fill", + ] + + return [ + str(ffmpeg_path), + "-y", + "-v", + "error", + "-i", + str(frame_path), + "-vf", + ",".join(filters), + "-frames:v", + "1", + "-update", + "1", + str(output_path), + ] + + +def _annotate_frames( + ffmpeg_path: Path, + extracted: ExtractedFrames, + zoom_path: ZoomPath, + target_width: int, + target_height: int, + output_dir: Path, +) -> list[Path]: + """Render annotated copies of extracted frames with crosshairs and crop boxes. + + Returns paths to the annotated frame files. Frames without a matching + zoom point are skipped. Failures are logged and skipped. + """ + annotated: list[Path] = [] + point_by_ts = {p.timestamp: p for p in zoom_path.points} + + for i, (frame_path, ts) in enumerate(zip(extracted.frame_paths, extracted.timestamps, strict=True)): + point = point_by_ts.get(ts) + if point is None or not frame_path.is_file(): + continue + + out_path = output_dir / f"annotated_{i:04d}.png" + cmd = _build_annotate_command( + ffmpeg_path, + frame_path, + out_path, + point, + extracted.source_width, + extracted.source_height, + target_width, + target_height, + ) + try: + subprocess.run(cmd, capture_output=True, text=True, timeout=30, check=True) + annotated.append(out_path) + except (subprocess.CalledProcessError, subprocess.TimeoutExpired, OSError): + log.debug("Failed to annotate frame %d, skipping", i, exc_info=True) + + return annotated + + +def write_zoom_debug( + game_dir: Path, + extracted: ExtractedFrames, + zoom_path: ZoomPath | None, + target_width: int, + target_height: int, + *, + ffmpeg_path: Path | None = None, + plugin_debug: dict[str, object] | None = None, +) -> Path: + """Write zoom debug artifacts to ``game_dir/debug/zoom/``. + + Creates: + - ``frame_NNNN.png`` — copies of extracted frame files + - ``annotated_NNNN.png`` — frames with crosshair and crop box overlay + - ``zoom_path.json`` — full zoom path data + generated ffmpeg expressions + - ``plugin_debug.json`` — plugin-provided debug data (prompts, model, etc.) + + When *ffmpeg_path* is provided and a zoom path exists, annotated frames + with crosshairs and crop boxes are generated. + + Returns the debug directory path. + """ + debug_dir = game_dir / "debug" / "zoom" + debug_dir.mkdir(parents=True, exist_ok=True) + + # Copy extracted frames into debug dir (not symlink — the temp + # extraction directory is cleaned up after rendering). + for i, frame_path in enumerate(extracted.frame_paths): + dest = debug_dir / f"frame_{i:04d}.png" + dest.unlink(missing_ok=True) + if frame_path.is_file(): + shutil.copy2(frame_path, dest) + + # Generate annotated frames with crosshairs + crop box + if zoom_path is not None and ffmpeg_path is not None: + annotated = _annotate_frames(ffmpeg_path, extracted, zoom_path, target_width, target_height, debug_dir) + if annotated: + log.debug("Wrote %d annotated frames to %s", len(annotated), debug_dir) + + # Write zoom path JSON + data: dict[str, object] = { + "source_width": extracted.source_width, + "source_height": extracted.source_height, + "duration": extracted.duration, + "fps": extracted.fps, + "frame_count": len(extracted.frame_paths), + "timestamps": list(extracted.timestamps), + "target_width": target_width, + "target_height": target_height, + } + + if zoom_path is not None: + points_data = [ + { + "timestamp": p.timestamp, + "center_x": p.center_x, + "center_y": p.center_y, + "confidence": p.confidence, + } + for p in zoom_path.points + ] + data["zoom_path"] = { + "points": points_data, + "point_count": len(zoom_path.points), + } + + # Include generated ffmpeg expressions for inspection + x_values = [(p.timestamp, p.center_x) for p in zoom_path.points] + y_values = [(p.timestamp, p.center_y) for p in zoom_path.points] + data["ffmpeg_expressions"] = { + "x_lerp": build_piecewise_lerp(x_values, zoom_path.duration), + "y_lerp": build_piecewise_lerp(y_values, zoom_path.duration), + "crop_filter": build_smart_crop_filter(zoom_path, target_width, target_height), + } + else: + data["zoom_path"] = None + data["ffmpeg_expressions"] = None + + zoom_json = debug_dir / "zoom_path.json" + zoom_json.write_text(json.dumps(data, indent=2), encoding="utf-8") + + # Write plugin debug data (prompts, model info, etc.) + if plugin_debug: + plugin_json = debug_dir / "plugin_debug.json" + plugin_json.write_text(json.dumps(plugin_debug, indent=2, default=str), encoding="utf-8") + + return debug_dir diff --git a/reeln/data/templates/goal_overlay.ass b/reeln/data/templates/goal_overlay.ass index 33b4001..07326d4 100644 --- a/reeln/data/templates/goal_overlay.ass +++ b/reeln/data/templates/goal_overlay.ass @@ -9,16 +9,16 @@ PlayResY: 1080 Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding Style: BoxBorder, Inter, 1, {{ass_secondary_color}}, {{ass_secondary_color}}, {{ass_secondary_color}}, &H00000000, 0,0,0,0,100,100,0,0,1,0,0,7,0,0,0,1 Style: BoxFill, Inter, 1, {{ass_primary_back}}, {{ass_primary_back}}, {{ass_primary_back}}, &H00000000, 0,0,0,0,100,100,0,0,1,0,0,7,0,0,0,1 -Style: TeamLine, Inter, 22, {{ass_team_text_color}}, {{ass_team_text_color}}, {{ass_secondary_color}}, &H00000000, 0,0,0,0,100,100,0,0,1,0,0,7,0,0,0,1 +Style: TeamLine, Inter, 24, {{ass_team_text_color}}, {{ass_team_text_color}}, {{ass_secondary_color}}, &H00000000, 1,0,0,0,100,100,1,0,1,0,0,7,0,0,0,1 Style: NameLine, Inter, 46, {{ass_name_color}}, {{ass_name_color}}, {{ass_name_outline_color}}, &H00000000, 1,0,0,0,100,100,0,0,1,2,0,7,0,0,0,1 Style: AssistLine, Inter, 30, {{ass_name_color}}, {{ass_name_color}}, {{ass_name_outline_color}}, &H00000000, 0,0,0,0,100,100,0,0,1,2,0,7,0,0,0,1 [Events] Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text ; Overlay block box (x=10,y=820,w=1900,h=220) with 6px border -Dialogue: 0,0:00:00.00,0:00:10.00,BoxBorder,,0,0,0,,{\p1\pos({{goal_overlay_border_x}},{{goal_overlay_border_y}})\bord0\shad0}m 0 0 l {{goal_overlay_border_w}} 0 l {{goal_overlay_border_w}} {{goal_overlay_border_h}} l 0 {{goal_overlay_border_h}} -Dialogue: 1,0:00:00.00,0:00:10.00,BoxFill,,0,0,0,,{\p1\pos({{goal_overlay_box_x}},{{goal_overlay_box_y}})\bord0\shad0}m 0 0 l {{goal_overlay_box_w}} 0 l {{goal_overlay_box_w}} {{goal_overlay_box_h}} l 0 {{goal_overlay_box_h}} -Dialogue: 2,0:00:00.00,0:00:10.00,TeamLine,,0,0,0,,{\an7\pos({{goal_overlay_team_x}},{{goal_overlay_team_y}})}{{goal_scorer_team}} - {{team_level}} +Dialogue: 0,0:00:00.00,{{box_end}},BoxBorder,,0,0,0,,{\p1\pos({{goal_overlay_border_x}},{{goal_overlay_border_y}})\bord0\shad0}m 0 0 l {{goal_overlay_border_w}} 0 l {{goal_overlay_border_w}} {{goal_overlay_border_h}} l 0 {{goal_overlay_border_h}} +Dialogue: 1,0:00:00.00,{{box_end}},BoxFill,,0,0,0,,{\p1\pos({{goal_overlay_box_x}},{{goal_overlay_box_y}})\bord0\shad0}m 0 0 l {{goal_overlay_box_w}} 0 l {{goal_overlay_box_w}} {{goal_overlay_box_h}} l 0 {{goal_overlay_box_h}} +Dialogue: 2,0:00:00.00,{{box_end}},TeamLine,,0,0,0,,{\an7\pos({{goal_overlay_team_x}},{{goal_overlay_team_y}})}{{goal_scorer_team}} - {{team_level}} Dialogue: 3,{{scorer_start}},{{scorer_end}},NameLine,,0,0,0,,{\an7\pos({{goal_overlay_scorer_x}},{{goal_overlay_scorer_y}})\fs{{goal_scorer_fs}}}{{goal_scorer_text}} Dialogue: 4,{{assist_start}},{{assist_end}},AssistLine,,0,0,0,,{\an7\pos({{goal_overlay_assist_1_x}},{{goal_overlay_assist_1_y}})\fs{{goal_assist_fs}}}{{goal_assist_1}} Dialogue: 5,{{assist_start}},{{assist_end}},AssistLine,,0,0,0,,{\an7\pos({{goal_overlay_assist_2_x}},{{goal_overlay_assist_2_y}})\fs{{goal_assist_fs}}}{{goal_assist_2}} diff --git a/reeln/models/branding.py b/reeln/models/branding.py new file mode 100644 index 0000000..95d2ef3 --- /dev/null +++ b/reeln/models/branding.py @@ -0,0 +1,20 @@ +"""Branding overlay configuration for rendered shorts.""" + +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class BrandingConfig: + """Configuration for the branding overlay shown at the start of renders. + + Attributes: + enabled: Whether branding is shown (default ``True``). + template: Template path — ``"builtin:branding"`` or user path to ``.ass``. + duration: How long the branding is visible in seconds. + """ + + enabled: bool = True + template: str = "builtin:branding" + duration: float = 5.0 diff --git a/reeln/models/config.py b/reeln/models/config.py index 1de9da1..31bf9f1 100644 --- a/reeln/models/config.py +++ b/reeln/models/config.py @@ -6,6 +6,7 @@ from pathlib import Path from typing import Any +from reeln.models.branding import BrandingConfig from reeln.models.plugin import OrchestrationConfig from reeln.models.profile import IterationConfig, RenderProfile @@ -53,5 +54,6 @@ class AppConfig: paths: PathConfig = field(default_factory=PathConfig) render_profiles: dict[str, RenderProfile] = field(default_factory=dict) iterations: IterationConfig = field(default_factory=IterationConfig) + branding: BrandingConfig = field(default_factory=BrandingConfig) orchestration: OrchestrationConfig = field(default_factory=OrchestrationConfig) plugins: PluginsConfig = field(default_factory=PluginsConfig) diff --git a/reeln/models/game.py b/reeln/models/game.py index 5025986..3eb1e3d 100644 --- a/reeln/models/game.py +++ b/reeln/models/game.py @@ -20,6 +20,10 @@ class GameInfo: period_length: int = 0 description: str = "" thumbnail: str = "" + level: str = "" + home_slug: str = "" + away_slug: str = "" + tournament: str = "" @dataclass @@ -61,6 +65,8 @@ class GameState: renders: list[RenderEntry] = field(default_factory=list) events: list[GameEvent] = field(default_factory=list) livestreams: dict[str, str] = field(default_factory=dict) + segment_outputs: list[str] = field(default_factory=list) + highlights_output: str = "" # --------------------------------------------------------------------------- @@ -81,6 +87,10 @@ def game_info_to_dict(info: GameInfo) -> dict[str, Any]: "period_length": info.period_length, "description": info.description, "thumbnail": info.thumbnail, + "level": info.level, + "home_slug": info.home_slug, + "away_slug": info.away_slug, + "tournament": info.tournament, } @@ -97,6 +107,10 @@ def dict_to_game_info(data: dict[str, Any]) -> GameInfo: period_length=int(data.get("period_length", 0)), description=str(data.get("description", "")), thumbnail=str(data.get("thumbnail", "")), + level=str(data.get("level", "")), + home_slug=str(data.get("home_slug", "")), + away_slug=str(data.get("away_slug", "")), + tournament=str(data.get("tournament", "")), ) @@ -164,6 +178,8 @@ def game_state_to_dict(state: GameState) -> dict[str, Any]: "renders": [render_entry_to_dict(r) for r in state.renders], "events": [game_event_to_dict(e) for e in state.events], "livestreams": dict(state.livestreams), + "segment_outputs": list(state.segment_outputs), + "highlights_output": state.highlights_output, } @@ -181,4 +197,6 @@ def dict_to_game_state(data: dict[str, Any]) -> GameState: renders=[dict_to_render_entry(r) for r in renders_raw], events=[dict_to_game_event(e) for e in events_raw], livestreams=dict(data.get("livestreams", {})), + segment_outputs=list(data.get("segment_outputs", [])), + highlights_output=str(data.get("highlights_output", "")), ) diff --git a/reeln/models/profile.py b/reeln/models/profile.py index aa38141..ad39987 100644 --- a/reeln/models/profile.py +++ b/reeln/models/profile.py @@ -6,6 +6,18 @@ from typing import Any +@dataclass(frozen=True) +class SpeedSegment: + """One segment in a variable-speed timeline. + + ``until`` is the source-time boundary in seconds (exclusive). + The last segment must have ``until=None`` (runs to end of clip). + """ + + speed: float + until: float | None = None + + @dataclass(frozen=True) class RenderProfile: """Named set of rendering parameter overrides. @@ -21,8 +33,11 @@ class RenderProfile: anchor_x: float | None = None anchor_y: float | None = None pad_color: str | None = None + scale: float | None = None + smart: bool | None = None # Filters (applied to both short-form and full-frame) speed: float | None = None + speed_segments: tuple[SpeedSegment, ...] | None = None lut: str | None = None subtitle_template: str | None = None # Encoding overrides @@ -61,6 +76,8 @@ def profiles_for_event(self, event_type: str) -> list[str]: "anchor_x", "anchor_y", "pad_color", + "scale", + "smart", "speed", "lut", "subtitle_template", @@ -71,6 +88,8 @@ def profiles_for_event(self, event_type: str) -> list[str]: "audio_bitrate", ) +# speed_segments is handled separately (not a simple scalar field) + def render_profile_to_dict(profile: RenderProfile) -> dict[str, Any]: """Serialize a ``RenderProfile``, omitting ``None`` fields.""" @@ -79,6 +98,11 @@ def render_profile_to_dict(profile: RenderProfile) -> dict[str, Any]: value = getattr(profile, field_name) if value is not None: result[field_name] = value + if profile.speed_segments is not None: + result["speed_segments"] = [ + {"speed": s.speed, **({"until": s.until} if s.until is not None else {})} + for s in profile.speed_segments + ] return result @@ -92,7 +116,10 @@ def dict_to_render_profile(name: str, data: dict[str, Any]) -> RenderProfile: anchor_x=_opt_float(data, "anchor_x"), anchor_y=_opt_float(data, "anchor_y"), pad_color=_opt_str(data, "pad_color"), + scale=_opt_float(data, "scale"), + smart=_opt_bool(data, "smart"), speed=_opt_float(data, "speed"), + speed_segments=_opt_speed_segments(data, "speed_segments"), lut=_opt_str(data, "lut"), subtitle_template=_opt_str(data, "subtitle_template"), codec=_opt_str(data, "codec"), @@ -132,6 +159,23 @@ def _opt_float(data: dict[str, Any], key: str) -> float | None: return float(v) if v is not None else None +def _opt_bool(data: dict[str, Any], key: str) -> bool | None: + v = data.get(key) + return bool(v) if v is not None else None + + def _opt_str(data: dict[str, Any], key: str) -> str | None: v = data.get(key) return str(v) if v is not None else None + + +def _opt_speed_segments( + data: dict[str, Any], key: str +) -> tuple[SpeedSegment, ...] | None: + v = data.get(key) + if v is None or not isinstance(v, list): + return None + return tuple( + SpeedSegment(speed=float(s["speed"]), until=s.get("until")) + for s in v + ) diff --git a/reeln/models/short.py b/reeln/models/short.py index 98c0daa..499f99e 100644 --- a/reeln/models/short.py +++ b/reeln/models/short.py @@ -5,6 +5,10 @@ from dataclasses import dataclass from enum import Enum from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from reeln.models.profile import SpeedSegment class CropMode(Enum): @@ -12,6 +16,8 @@ class CropMode(Enum): PAD = "pad" CROP = "crop" + SMART = "smart" + SMART_PAD = "smart_pad" class OutputFormat(Enum): @@ -56,6 +62,8 @@ class ShortConfig: crop_mode: CropMode = CropMode.PAD anchor_x: float = 0.5 anchor_y: float = 0.5 + scale: float = 1.0 + smart: bool = False pad_color: str = "black" speed: float = 1.0 lut: Path | None = None @@ -66,3 +74,6 @@ class ShortConfig: crf: int = 18 audio_codec: str = "aac" audio_bitrate: str = "128k" + speed_segments: tuple[SpeedSegment, ...] | None = None + smart_zoom_frames: int = 5 + branding: Path | None = None diff --git a/reeln/models/team.py b/reeln/models/team.py index c302e85..4457de9 100644 --- a/reeln/models/team.py +++ b/reeln/models/team.py @@ -6,6 +6,15 @@ from typing import Any +@dataclass +class RosterEntry: + """A single player entry from a roster CSV.""" + + number: str + name: str + position: str + + @dataclass class TeamProfile: """Reusable team configuration with metadata for rendering and plugins.""" diff --git a/reeln/models/zoom.py b/reeln/models/zoom.py new file mode 100644 index 0000000..e4cdba2 --- /dev/null +++ b/reeln/models/zoom.py @@ -0,0 +1,41 @@ +"""Data models for smart target zoom.""" + +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path + + +@dataclass(frozen=True) +class ZoomPoint: + """A single detected target position at a given timestamp. + + Coordinates are normalized to 0.0-1.0 relative to source dimensions. + """ + + timestamp: float + center_x: float + center_y: float + confidence: float = 1.0 + + +@dataclass(frozen=True) +class ZoomPath: + """An ordered sequence of zoom points describing a camera pan path.""" + + points: tuple[ZoomPoint, ...] + source_width: int + source_height: int + duration: float + + +@dataclass(frozen=True) +class ExtractedFrames: + """Result of extracting frames from a video for analysis.""" + + frame_paths: tuple[Path, ...] + timestamps: tuple[float, ...] + source_width: int + source_height: int + duration: float + fps: float diff --git a/reeln/plugins/hooks.py b/reeln/plugins/hooks.py index 094fd85..26c80bc 100644 --- a/reeln/plugins/hooks.py +++ b/reeln/plugins/hooks.py @@ -22,6 +22,7 @@ class Hook(Enum): ON_HIGHLIGHTS_MERGED = "on_highlights_merged" ON_SEGMENT_START = "on_segment_start" ON_SEGMENT_COMPLETE = "on_segment_complete" + ON_FRAMES_EXTRACTED = "on_frames_extracted" ON_ERROR = "on_error" diff --git a/reeln/plugins/loader.py b/reeln/plugins/loader.py index 72822b0..9ecdefd 100644 --- a/reeln/plugins/loader.py +++ b/reeln/plugins/loader.py @@ -9,6 +9,7 @@ from reeln.core.errors import PluginError from reeln.core.log import get_logger from reeln.models.config import PluginsConfig +from reeln.models.doctor import DoctorCheck from reeln.models.plugin import PluginInfo from reeln.plugins.hooks import Hook from reeln.plugins.registry import FilteredRegistry, HookRegistry, get_registry @@ -17,11 +18,26 @@ _ENTRY_POINT_GROUP: str = "reeln.plugins" +# CLI-level override: when True, disables hook enforcement regardless of config. +_cli_no_enforce_hooks: bool = False + + +def set_enforce_hooks_override(*, disable: bool) -> None: + """Set a CLI-level override to disable hook enforcement. + + Called by the top-level ``--no-enforce-hooks`` flag so that all + ``activate_plugins`` calls in the process skip enforcement. + """ + global _cli_no_enforce_hooks + _cli_no_enforce_hooks = disable + + _CAPABILITY_CHECKS: list[tuple[str, str]] = [ ("generator", "generate"), ("enricher", "enrich"), ("uploader", "upload"), ("notifier", "notify"), + ("doctor", "doctor_checks"), ] @@ -235,7 +251,8 @@ def activate_plugins(plugins_config: PluginsConfig) -> dict[str, object]: settings=plugins_config.settings, ) - if plugins_config.enforce_hooks: + enforce = plugins_config.enforce_hooks and not _cli_no_enforce_hooks + if enforce: caps = _fetch_registry_capabilities(plugins_config.registry_url) else: caps = {} @@ -246,3 +263,27 @@ def activate_plugins(plugins_config: PluginsConfig) -> dict[str, object]: _register_plugin_hooks(name, plugin, registry, allowed_hooks=allowed) return loaded + + +def collect_doctor_checks(loaded_plugins: dict[str, object]) -> list[DoctorCheck]: + """Collect ``DoctorCheck`` instances from loaded plugins. + + Calls ``doctor_checks()`` on each plugin that exposes it. Each call + should return a list of ``DoctorCheck`` objects. Failures are logged + and skipped. + """ + checks: list[DoctorCheck] = [] + for name, plugin in loaded_plugins.items(): + fn = getattr(plugin, "doctor_checks", None) + if not callable(fn): + continue + try: + plugin_checks = fn() + checks.extend(plugin_checks) + except Exception: + log.warning( + "Plugin %s doctor_checks() failed, skipping", + name, + exc_info=True, + ) + return checks diff --git a/registry/plugins.json b/registry/plugins.json index 50e35b8..a7a4512 100644 --- a/registry/plugins.json +++ b/registry/plugins.json @@ -34,10 +34,10 @@ { "name": "openai", "package": "reeln-plugin-openai", - "description": "OpenAI-powered LLM integration — livestream metadata, game thumbnails, and translation", - "capabilities": ["hook:ON_GAME_INIT"], + "description": "OpenAI-powered LLM integration — livestream metadata, short metadata, game thumbnails, and translation", + "capabilities": ["hook:ON_GAME_INIT", "hook:POST_RENDER", "hook:ON_FRAMES_EXTRACTED"], "homepage": "https://github.com/StreamnDad/reeln-plugin-openai", - "min_reeln_version": "0.0.19", + "min_reeln_version": "0.0.33", "author": "StreamnDad", "license": "AGPL-3.0" } diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 50c5bf6..3ed1228 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -40,7 +40,6 @@ def clean_env(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.delenv(var, raising=False) - # --------------------------------------------------------------------------- # Game info fixtures # --------------------------------------------------------------------------- diff --git a/tests/integration/test_game_lifecycle.py b/tests/integration/test_game_lifecycle.py index b3b3421..fee14d7 100644 --- a/tests/integration/test_game_lifecycle.py +++ b/tests/integration/test_game_lifecycle.py @@ -267,6 +267,11 @@ def test_double_header_auto_numbering( game_dir1, _ = init_game(tmp_path, info1) assert "_g" not in game_dir1.name + # Finish first game before starting second + from reeln.core.finish import finish_game + + finish_game(game_dir1) + info2 = GameInfo( date="2026-02-26", home_team="roseville", @@ -276,7 +281,14 @@ def test_double_header_auto_numbering( game_dir2, _ = init_game(tmp_path, info2) assert game_dir2.name.endswith("_g2") - # States are independent + # States are independent — re-init game_dir1 state for segment processing + from reeln.core.highlights import save_game_state + + state1 = load_game_state(game_dir1) + state1.finished = False + state1.finished_at = "" + save_game_state(state1, game_dir1) + _populate_segment(game_dir1, "hockey", 1) process_segment(game_dir1, 1, ffmpeg_path=_FFMPEG) diff --git a/tests/unit/commands/test_cli.py b/tests/unit/commands/test_cli.py index ae62981..f7c2592 100644 --- a/tests/unit/commands/test_cli.py +++ b/tests/unit/commands/test_cli.py @@ -279,3 +279,42 @@ def test_doctor_help() -> None: result = runner.invoke(app, ["doctor", "--help"]) assert result.exit_code == 0 assert "health checks" in result.output.lower() or "doctor" in result.output.lower() + + +# --------------------------------------------------------------------------- +# --no-enforce-hooks +# --------------------------------------------------------------------------- + + +def test_no_enforce_hooks_flag_in_help() -> None: + result = runner.invoke(app, ["--help"]) + assert result.exit_code == 0 + assert "--no-enforce-hooks" in result.output + + +def test_no_enforce_hooks_sets_override() -> None: + """--no-enforce-hooks sets the module-level override flag.""" + import reeln.plugins.loader as loader_mod + + original = loader_mod._cli_no_enforce_hooks + try: + # Use "config show" to trigger the callback body (--help exits before it) + with patch("reeln.core.config.load_config", return_value=AppConfig()): + runner.invoke(app, ["--no-enforce-hooks", "config", "show"]) + assert loader_mod._cli_no_enforce_hooks is True + finally: + loader_mod._cli_no_enforce_hooks = original + + +def test_no_enforce_hooks_not_set_by_default() -> None: + """Without the flag, the override remains False.""" + import reeln.plugins.loader as loader_mod + + original = loader_mod._cli_no_enforce_hooks + loader_mod._cli_no_enforce_hooks = False + try: + with patch("reeln.core.config.load_config", return_value=AppConfig()): + runner.invoke(app, ["config", "show"]) + assert loader_mod._cli_no_enforce_hooks is False + finally: + loader_mod._cli_no_enforce_hooks = original diff --git a/tests/unit/commands/test_game.py b/tests/unit/commands/test_game.py index 7bdccf7..540fc6a 100644 --- a/tests/unit/commands/test_game.py +++ b/tests/unit/commands/test_game.py @@ -182,6 +182,13 @@ def test_game_init_double_header(tmp_path: Path) -> None: ) assert result1.exit_code == 0 + # Finish first game before starting second + result_finish = runner.invoke( + app, + ["game", "finish", "-o", str(tmp_path / f"{_today()}_a_vs_b")], + ) + assert result_finish.exit_code == 0 + # Second game — auto-detects double-header result2 = runner.invoke( app, @@ -282,6 +289,7 @@ def _mock_collect(**overrides: object) -> dict[str, object]: "period_length": 0, "description": "", "thumbnail": "", + "tournament": "", "home_profile": None, "away_profile": None, } @@ -469,6 +477,72 @@ def test_game_init_with_level_resolves_profiles(tmp_path: Path) -> None: assert game_dir.is_dir() +def test_game_init_with_level_persists_slugs(tmp_path: Path) -> None: + """Non-interactive mode: --level stores level and team slugs in game.json.""" + from reeln.models.team import TeamProfile + + home_profile = TeamProfile(team_name="Roseville", short_name="ROS", level="bantam") + away_profile = TeamProfile(team_name="Mahtomedi", short_name="MAH", level="bantam") + + with patch("reeln.core.teams.load_team_profile") as mock_load: + mock_load.side_effect = [home_profile, away_profile] + result = runner.invoke( + app, + ["game", "init", "roseville", "mahtomedi", "--level", "bantam", "-o", str(tmp_path)], + ) + + assert result.exit_code == 0 + game_dir = tmp_path / f"{_today()}_Roseville_vs_Mahtomedi" + state = json.loads((game_dir / "game.json").read_text(encoding="utf-8")) + assert state["game_info"]["level"] == "bantam" + assert state["game_info"]["home_slug"] == "roseville" + assert state["game_info"]["away_slug"] == "mahtomedi" + + +def test_game_init_with_tournament(tmp_path: Path) -> None: + """Non-interactive mode: --tournament is stored in game.json.""" + result = runner.invoke( + app, + ["game", "init", "a", "b", "--tournament", "2026 Stars of Tomorrow", "-o", str(tmp_path)], + ) + assert result.exit_code == 0 + + game_dir = tmp_path / f"{_today()}_a_vs_b" + state = json.loads((game_dir / "game.json").read_text(encoding="utf-8")) + assert state["game_info"]["tournament"] == "2026 Stars of Tomorrow" + + +def test_game_init_interactive_passes_tournament_preset(tmp_path: Path) -> None: + """When --tournament is set, it's passed as preset (not None).""" + result_dict = _mock_collect(tournament="Stars Cup") + with patch( + "reeln.commands.game.collect_game_info_interactive", + return_value=result_dict, + ) as mock_collect: + result = runner.invoke( + app, + ["game", "init", "--tournament", "Stars Cup", "-o", str(tmp_path)], + ) + + assert result.exit_code == 0 + call_kwargs = mock_collect.call_args.kwargs + assert call_kwargs["tournament"] == "Stars Cup" + + +def test_game_init_without_level_no_slugs(tmp_path: Path) -> None: + """Without --level, level/slug fields default to empty strings.""" + result = runner.invoke( + app, + ["game", "init", "a", "b", "-o", str(tmp_path)], + ) + assert result.exit_code == 0 + game_dir = tmp_path / f"{_today()}_a_vs_b" + state = json.loads((game_dir / "game.json").read_text(encoding="utf-8")) + assert state["game_info"]["level"] == "" + assert state["game_info"]["home_slug"] == "" + assert state["game_info"]["away_slug"] == "" + + def test_game_init_with_level_missing_profile(tmp_path: Path) -> None: """Non-interactive mode: --level with unknown slug raises error.""" with patch( @@ -505,15 +579,41 @@ def test_game_init_interactive_passes_period_length_preset(tmp_path: Path) -> No "reeln.commands.game.collect_game_info_interactive", return_value=result_dict, ) as mock_collect: - result = runner.invoke( - app, ["game", "init", "--period-length", "12", "-o", str(tmp_path)] - ) + result = runner.invoke(app, ["game", "init", "--period-length", "12", "-o", str(tmp_path)]) assert result.exit_code == 0 call_kwargs = mock_collect.call_args.kwargs assert call_kwargs["period_length"] == 12 +def test_game_init_interactive_passes_level_preset(tmp_path: Path) -> None: + """When --level is set in interactive mode, it's passed as preset.""" + result_dict = _mock_collect() + with patch( + "reeln.commands.game.collect_game_info_interactive", + return_value=result_dict, + ) as mock_collect: + result = runner.invoke(app, ["game", "init", "--level", "2016", "-o", str(tmp_path)]) + + assert result.exit_code == 0 + call_kwargs = mock_collect.call_args.kwargs + assert call_kwargs["level"] == "2016" + + +def test_game_init_interactive_level_none_by_default(tmp_path: Path) -> None: + """When --level is not set, level is None in interactive mode.""" + result_dict = _mock_collect() + with patch( + "reeln.commands.game.collect_game_info_interactive", + return_value=result_dict, + ) as mock_collect: + result = runner.invoke(app, ["game", "init", "-o", str(tmp_path)]) + + assert result.exit_code == 0 + call_kwargs = mock_collect.call_args.kwargs + assert call_kwargs["level"] is None + + # --------------------------------------------------------------------------- # game segment # --------------------------------------------------------------------------- diff --git a/tests/unit/commands/test_plugins_cmd.py b/tests/unit/commands/test_plugins_cmd.py index 1c58767..fe5700e 100644 --- a/tests/unit/commands/test_plugins_cmd.py +++ b/tests/unit/commands/test_plugins_cmd.py @@ -370,7 +370,11 @@ def test_plugins_install_with_version() -> None: assert result.exit_code == 0 assert "installed successfully" in result.output mock_install.assert_called_once_with( - "youtube", entries, dry_run=False, installer="", version="1.2.0", + "youtube", + entries, + dry_run=False, + installer="", + version="1.2.0", ) @@ -421,7 +425,11 @@ def test_plugins_update_with_version() -> None: assert result.exit_code == 0 assert "updated successfully" in result.output mock_update.assert_called_once_with( - "youtube", entries, dry_run=False, installer="", version="2.0.0", + "youtube", + entries, + dry_run=False, + installer="", + version="2.0.0", ) diff --git a/tests/unit/commands/test_render.py b/tests/unit/commands/test_render.py index 57388b7..6ad4c02 100644 --- a/tests/unit/commands/test_render.py +++ b/tests/unit/commands/test_render.py @@ -208,6 +208,40 @@ def test_render_short_dry_run(tmp_path: Path) -> None: assert "Size: 1080x1920" in result.output +def test_render_short_default_output_in_shorts_subdir(tmp_path: Path) -> None: + """Default output path puts renders in a shorts/ subdirectory.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "shorts/clip_short.mp4" in result.output + + +def test_render_preview_default_output_in_shorts_subdir(tmp_path: Path) -> None: + """Default preview output path also uses shorts/ subdirectory.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "preview", + str(clip), + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "shorts/clip_preview.mp4" in result.output + + def test_render_short_dry_run_crop_mode(tmp_path: Path) -> None: clip = tmp_path / "clip.mkv" clip.touch() @@ -1028,8 +1062,9 @@ def test_short_player_flag_populates_overlay_without_game(tmp_path: Path) -> Non cfg = tmp_path / "config.json" cfg.write_text(json.dumps(cfg_data)) - with patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), patch( - "reeln.core.ffmpeg.probe_duration", return_value=10.0 + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), ): result = runner.invoke( app, @@ -1068,8 +1103,9 @@ def test_short_assists_flag_populates_overlay_without_game(tmp_path: Path) -> No cfg = tmp_path / "config.json" cfg.write_text(json.dumps(cfg_data)) - with patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), patch( - "reeln.core.ffmpeg.probe_duration", return_value=10.0 + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), ): result = runner.invoke( app, @@ -1101,9 +1137,7 @@ def test_short_player_flag_overrides_event_data(tmp_path: Path) -> None: game_dir = tmp_path / "game" game_dir.mkdir() state = GameState( - game_info=GameInfo( - date="2026-02-28", home_team="A", away_team="B", sport="hockey" - ), + game_info=GameInfo(date="2026-02-28", home_team="A", away_team="B", sport="hockey"), created_at="2026-02-28T12:00:00+00:00", events=[ GameEvent( @@ -1126,8 +1160,9 @@ def test_short_player_flag_overrides_event_data(tmp_path: Path) -> None: cfg = tmp_path / "config.json" cfg.write_text(json.dumps(cfg_data)) - with patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), patch( - "reeln.core.ffmpeg.probe_duration", return_value=10.0 + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), ): result = runner.invoke( app, @@ -1154,6 +1189,73 @@ def test_short_player_flag_overrides_event_data(tmp_path: Path) -> None: assert "Subtitle:" in result.output +def test_short_player_event_in_post_render_hook(tmp_path: Path) -> None: + """Player, assists, and game_event are included in POST_RENDER hook data.""" + from unittest.mock import MagicMock + + clip = tmp_path / "clip.mkv" + clip.touch() + game_dir = tmp_path / "game" + game_dir.mkdir() + state = GameState( + game_info=GameInfo(date="2026-02-28", home_team="A", away_team="B", sport="hockey"), + created_at="2026-02-28T12:00:00+00:00", + events=[ + GameEvent( + id="ev1", + clip="clip.mkv", + segment_number=1, + event_type="goal", + ), + ], + ) + _write_game_state(game_dir, state) + + mock_result = _mock_result(tmp_path) + emitted: list[object] = [] + + def capture_emit(hook: object, ctx: object) -> None: + from reeln.plugins.hooks import Hook + + if getattr(hook, "value", None) == Hook.POST_RENDER.value: + emitted.append(ctx) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.plugins.registry.get_registry") as mock_get_reg, + ): + mock_renderer_cls.return_value.render.return_value = mock_result + mock_reg = MagicMock() + mock_reg.emit.side_effect = capture_emit + mock_get_reg.return_value = mock_reg + + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--game-dir", + str(game_dir), + "--event", + "ev1", + "--player", + "Jane Doe", + "--assists", + "John Smith", + ], + ) + + assert result.exit_code == 0 + assert len(emitted) == 1 + ctx = emitted[0] + assert getattr(ctx, "data", {}).get("player") == "Jane Doe" + assert getattr(ctx, "data", {}).get("assists") == "John Smith" + assert getattr(ctx, "data", {}).get("game_event") is not None + assert getattr(ctx.data["game_event"], "event_type", None) == "goal" + + def test_short_player_flag_without_render_profile_is_noop(tmp_path: Path) -> None: """--player without --render-profile is ignored (no subtitle template to fill).""" clip = tmp_path / "clip.mkv" @@ -1194,8 +1296,9 @@ def test_preview_player_flag(tmp_path: Path) -> None: cfg = tmp_path / "config.json" cfg.write_text(json.dumps(cfg_data)) - with patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), patch( - "reeln.core.ffmpeg.probe_duration", return_value=10.0 + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), ): result = runner.invoke( app, @@ -1234,8 +1337,9 @@ def test_apply_player_flag_without_game_dir(tmp_path: Path) -> None: cfg = tmp_path / "config.json" cfg.write_text(json.dumps(cfg_data)) - with patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), patch( - "reeln.core.ffmpeg.probe_duration", return_value=10.0 + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), ): result = runner.invoke( app, @@ -1269,14 +1373,15 @@ def test_apply_player_flag_overrides_event(tmp_path: Path) -> None: game_dir = tmp_path / "game" game_dir.mkdir() state = GameState( - game_info=GameInfo( - date="2026-02-28", home_team="A", away_team="B", sport="hockey" - ), + game_info=GameInfo(date="2026-02-28", home_team="A", away_team="B", sport="hockey"), created_at="2026-02-28T12:00:00+00:00", events=[ GameEvent( - id="ev1", clip="x.mkv", segment_number=1, - event_type="goal", player="OldPlayer", + id="ev1", + clip="x.mkv", + segment_number=1, + event_type="goal", + player="OldPlayer", ), ], ) @@ -1290,8 +1395,9 @@ def test_apply_player_flag_overrides_event(tmp_path: Path) -> None: cfg = tmp_path / "config.json" cfg.write_text(json.dumps(cfg_data)) - with patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), patch( - "reeln.core.ffmpeg.probe_duration", return_value=10.0 + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), ): result = runner.invoke( app, @@ -1329,9 +1435,7 @@ def test_short_subtitle_temp_cleanup_after_render(tmp_path: Path) -> None: game_dir = tmp_path / "game" game_dir.mkdir() state = GameState( - game_info=GameInfo( - date="2026-02-28", home_team="A", away_team="B", sport="hockey" - ), + game_info=GameInfo(date="2026-02-28", home_team="A", away_team="B", sport="hockey"), created_at="2026-02-28T12:00:00+00:00", ) _write_game_state(game_dir, state) @@ -3235,14 +3339,15 @@ def test_short_iterate_with_player_and_assists(tmp_path: Path) -> None: game_dir = tmp_path / "game" game_dir.mkdir() state = GameState( - game_info=GameInfo( - date="2026-02-26", home_team="A", away_team="B", sport="hockey" - ), + game_info=GameInfo(date="2026-02-26", home_team="A", away_team="B", sport="hockey"), created_at="t1", events=[ GameEvent( - id="ev1", clip="clip.mkv", segment_number=1, - event_type="goal", created_at="t1", + id="ev1", + clip="clip.mkv", + segment_number=1, + event_type="goal", + created_at="t1", ), ], ) @@ -3292,41 +3397,1635 @@ def test_short_iterate_with_player_and_assists(tmp_path: Path) -> None: assert meta["assists"] == "#22 Jones" -def test_short_subtitle_game_dir_load_fails_nonfatal(tmp_path: Path) -> None: - """Subtitle resolution handles game_dir load failure gracefully.""" +def test_short_iterate_smart_passes_zoom_path(tmp_path: Path) -> None: + """--smart --iterate extracts frames and passes zoom_path to render_iterations.""" clip = tmp_path / "clip.mkv" clip.touch() - template = tmp_path / "overlay.ass" - template.write_text("Static overlay", encoding="utf-8") + from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + from reeln.plugins.hooks import Hook + from reeln.plugins.registry import get_registry + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) - bad_game_dir = tmp_path / "badgame" - bad_game_dir.mkdir() - # Write invalid game.json to trigger ReelnError - (bad_game_dir / "game.json").write_text("not json!") + def _provide_zoom(context: object) -> None: + from reeln.plugins.hooks import HookContext - cfg_data = { - "render_profiles": { - "overlay": {"subtitle_template": str(template)}, - }, - } - cfg = tmp_path / "config.json" - cfg.write_text(json.dumps(cfg_data)) + assert isinstance(context, HookContext) + context.shared["smart_zoom"] = {"zoom_path": zoom} - result = runner.invoke( - app, - [ - "render", - "short", - str(clip), - "--render-profile", - "overlay", - "--game-dir", - str(bad_game_dir), - "--config", - str(cfg), - "--dry-run", - ], + def _activate_with_zoom_handler(plugins_config: object) -> dict[str, object]: + get_registry().register(Hook.ON_FRAMES_EXTRACTED, _provide_zoom) + return {} + + cfg = _config_with_iterations(tmp_path) + iter_result = IterationResult( + output=tmp_path / "out.mp4", + iteration_outputs=[], + profile_names=["fullspeed", "slowmo"], + concat_copy=True, ) + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.commands.render.activate_plugins", side_effect=_activate_with_zoom_handler), + patch( + "reeln.core.iterations.render_iterations", + return_value=(iter_result, ["Iteration rendering complete"]), + ) as mock_iter, + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "crop", + "--smart", + "--iterate", + "--config", + str(cfg), + ], + ) assert result.exit_code == 0 - assert "Subtitle:" in result.output + assert "Iteration rendering complete" in result.output + call_kwargs = mock_iter.call_args + assert call_kwargs.kwargs.get("zoom_path") is zoom + assert call_kwargs.kwargs.get("source_fps") == 60.0 + + +def test_short_iterate_smart_debug_writes_zoom(tmp_path: Path) -> None: + """--smart --iterate --debug writes zoom debug artifacts.""" + clip = tmp_path / "clip.mkv" + clip.touch() + game_dir = tmp_path / "game" + game_dir.mkdir() + + from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + from reeln.plugins.hooks import Hook + from reeln.plugins.registry import get_registry + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + def _provide_zoom(context: object) -> None: + from reeln.plugins.hooks import HookContext + + assert isinstance(context, HookContext) + context.shared["smart_zoom"] = {"zoom_path": zoom} + + def _activate_with_zoom_handler(plugins_config: object) -> dict[str, object]: + get_registry().register(Hook.ON_FRAMES_EXTRACTED, _provide_zoom) + return {} + + cfg = _config_with_iterations(tmp_path) + iter_result = IterationResult( + output=tmp_path / "out.mp4", + iteration_outputs=[], + profile_names=["fullspeed", "slowmo"], + concat_copy=True, + ) + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.commands.render.activate_plugins", side_effect=_activate_with_zoom_handler), + patch( + "reeln.core.iterations.render_iterations", + return_value=(iter_result, ["Iteration rendering complete"]), + ), + patch("reeln.core.zoom_debug.write_zoom_debug") as mock_zoom_debug, + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "crop", + "--smart", + "--iterate", + "--game-dir", + str(game_dir), + "--debug", + "--config", + str(cfg), + ], + ) + assert result.exit_code == 0 + assert "Debug:" in result.output + mock_zoom_debug.assert_called_once() + + +def test_short_iterate_debug_no_smart_no_zoom_debug(tmp_path: Path) -> None: + """--iterate --debug without --smart doesn't write zoom debug.""" + clip = tmp_path / "clip.mkv" + clip.touch() + game_dir = tmp_path / "game" + game_dir.mkdir() + + cfg = _config_with_iterations(tmp_path) + iter_result = IterationResult( + output=tmp_path / "out.mp4", + iteration_outputs=[], + profile_names=["fullspeed", "slowmo"], + concat_copy=True, + ) + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch( + "reeln.core.iterations.render_iterations", + return_value=(iter_result, ["Iteration rendering complete"]), + ), + ): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--iterate", + "--game-dir", + str(game_dir), + "--debug", + "--config", + str(cfg), + ], + ) + assert result.exit_code == 0 + assert "Debug:" not in result.output + + +def test_short_iterate_smart_no_plugin_falls_back(tmp_path: Path) -> None: + """--smart --iterate without plugin providing zoom falls back to static.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.models.zoom import ExtractedFrames + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + cfg = _config_with_iterations(tmp_path) + iter_result = IterationResult( + output=tmp_path / "out.mp4", + iteration_outputs=[], + profile_names=["fullspeed", "slowmo"], + concat_copy=True, + ) + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch( + "reeln.core.iterations.render_iterations", + return_value=(iter_result, ["Iteration rendering complete"]), + ) as mock_iter, + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "crop", + "--smart", + "--iterate", + "--config", + str(cfg), + ], + ) + assert result.exit_code == 0 + assert "No smart zoom data from plugins" in result.output + assert "Iteration rendering complete" in result.output + call_kwargs = mock_iter.call_args + assert call_kwargs.kwargs.get("zoom_path") is None + + +def test_short_subtitle_game_dir_load_fails_nonfatal(tmp_path: Path) -> None: + """Subtitle resolution handles game_dir load failure gracefully.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + template = tmp_path / "overlay.ass" + template.write_text("Static overlay", encoding="utf-8") + + bad_game_dir = tmp_path / "badgame" + bad_game_dir.mkdir() + # Write invalid game.json to trigger ReelnError + (bad_game_dir / "game.json").write_text("not json!") + + cfg_data = { + "render_profiles": { + "overlay": {"subtitle_template": str(template)}, + }, + } + cfg = tmp_path / "config.json" + cfg.write_text(json.dumps(cfg_data)) + + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--render-profile", + "overlay", + "--game-dir", + str(bad_game_dir), + "--config", + str(cfg), + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Subtitle:" in result.output + + +# --------------------------------------------------------------------------- +# Smart zoom — crop mode: smart +# --------------------------------------------------------------------------- + + +def test_render_short_smart_crop_fallback_no_plugin(tmp_path: Path) -> None: + """Smart crop with no plugin providing zoom data falls back to center crop.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.models.zoom import ExtractedFrames + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "smart", + "--dry-run", + ], + ) + + assert result.exit_code == 0 + assert "No smart zoom data from plugins" in result.output + assert "Crop mode: crop" in result.output + assert "Dry run" in result.output + + +def test_render_short_smart_crop_with_zoom_path(tmp_path: Path) -> None: + """Smart crop with a plugin providing zoom path shows smart zoom info.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + from reeln.plugins.hooks import Hook + from reeln.plugins.registry import get_registry + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + def _provide_zoom(context: object) -> None: + from reeln.plugins.hooks import HookContext + + assert isinstance(context, HookContext) + context.shared["smart_zoom"] = {"zoom_path": zoom} + + def _activate_with_zoom_handler(plugins_config: object) -> dict[str, object]: + # Simulate activate_plugins but register our test handler + get_registry().register(Hook.ON_FRAMES_EXTRACTED, _provide_zoom) + return {} + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.commands.render.activate_plugins", side_effect=_activate_with_zoom_handler), + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "smart", + "--dry-run", + ], + ) + + assert result.exit_code == 0 + assert "Smart zoom: 2 target points" in result.output + assert "Crop mode: smart" in result.output + assert "Dry run" in result.output + + +def test_render_short_smart_crop_extract_error(tmp_path: Path) -> None: + """Smart crop errors when frame extraction fails.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.core.errors import RenderError + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + ): + mock_renderer_cls.return_value.extract_frames.side_effect = RenderError("probe failed") + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "smart", + ], + ) + + assert result.exit_code == 1 + assert "Error extracting frames" in result.output + + +def test_render_short_smart_crop_zoom_frames_option(tmp_path: Path) -> None: + """--zoom-frames is passed to extract_frames.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.models.zoom import ExtractedFrames + + frames = ExtractedFrames( + frame_paths=tuple(tmp_path / f"f{i}.png" for i in range(3)), + timestamps=(2.5, 5.0, 7.5), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "smart", + "--zoom-frames", + "3", + "--dry-run", + ], + ) + + assert result.exit_code == 0 + call_args = mock_renderer_cls.return_value.extract_frames.call_args + assert call_args[1]["count"] == 3 or call_args[0][1] == 3 + + +def test_render_short_smart_crop_cleanup(tmp_path: Path) -> None: + """Extracted frames directory is cleaned up even on error.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.models.zoom import ExtractedFrames + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + created_dirs: list[Path] = [] + original_mkdtemp = __import__("tempfile").mkdtemp + + def _tracking_mkdtemp(**kwargs: object) -> str: + result_str = original_mkdtemp(**kwargs) + created_dirs.append(Path(result_str)) + return result_str + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("tempfile.mkdtemp", side_effect=_tracking_mkdtemp), + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + runner.invoke( + app, + ["render", "short", str(clip), "--crop", "smart", "--dry-run"], + ) + + # The temp dir should have been cleaned up + for d in created_dirs: + assert not d.exists() + + +def test_render_short_smart_crop_ffmpeg_discovery_error(tmp_path: Path) -> None: + """Smart crop errors when ffmpeg discovery fails.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.core.errors import FFmpegError + + with patch("reeln.core.ffmpeg.discover_ffmpeg", side_effect=FFmpegError("not found")): + result = runner.invoke( + app, + ["render", "short", str(clip), "--crop", "smart"], + ) + + assert result.exit_code == 1 + assert "Error:" in result.output + + +def test_render_short_smart_crop_debug_with_zoom(tmp_path: Path) -> None: + """Debug mode with smart zoom includes zoom_path info.""" + clip = tmp_path / "clip.mkv" + clip.touch() + game_dir = tmp_path / "game" + game_dir.mkdir() + + from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + from reeln.plugins.hooks import Hook + from reeln.plugins.registry import get_registry + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + info = GameInfo( + home_team="TeamA", + away_team="TeamB", + sport="hockey", + date="2026-03-19", + ) + state = GameState(game_info=info) + _write_game_state(game_dir, state) + + mock_result = RenderResult( + output=tmp_path / "out.mp4", + duration_seconds=10.0, + file_size_bytes=512000, + ffmpeg_command=["ffmpeg", "-y", "out.mp4"], + ) + + def _activate_with_zoom(plugins_config: object) -> dict[str, object]: + get_registry().register( + Hook.ON_FRAMES_EXTRACTED, + lambda ctx: ctx.shared.update({"smart_zoom": {"zoom_path": zoom}}), + ) + return {} + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.commands.render.activate_plugins", side_effect=_activate_with_zoom), + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + mock_renderer_cls.return_value.render.return_value = mock_result + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "smart", + "--game-dir", + str(game_dir), + "--debug", + ], + ) + + assert result.exit_code == 0 + assert "Smart zoom: 2 target points" in result.output + assert "Debug:" in result.output + + +def test_render_short_smart_debug_captures_plugin_debug(tmp_path: Path) -> None: + """Debug mode saves plugin debug data (prompts) to zoom debug directory.""" + clip = tmp_path / "clip.mkv" + clip.touch() + game_dir = tmp_path / "game" + game_dir.mkdir() + + from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + from reeln.plugins.hooks import Hook + from reeln.plugins.registry import get_registry + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + info = GameInfo( + home_team="TeamA", + away_team="TeamB", + sport="hockey", + date="2026-03-21", + ) + state = GameState(game_info=info) + _write_game_state(game_dir, state) + + mock_result = RenderResult( + output=tmp_path / "out.mp4", + duration_seconds=10.0, + file_size_bytes=512000, + ffmpeg_command=["ffmpeg", "-y", "out.mp4"], + ) + + plugin_debug = {"prompt": "analyze this frame", "model": "gpt-4o"} + + def _activate_with_zoom_debug(plugins_config: object) -> dict[str, object]: + get_registry().register( + Hook.ON_FRAMES_EXTRACTED, + lambda ctx: ctx.shared.update({"smart_zoom": {"zoom_path": zoom, "debug": plugin_debug}}), + ) + return {} + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.commands.render.activate_plugins", side_effect=_activate_with_zoom_debug), + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + mock_renderer_cls.return_value.render.return_value = mock_result + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "crop", + "--smart", + "--game-dir", + str(game_dir), + "--debug", + ], + ) + + assert result.exit_code == 0 + # Plugin debug should be written + plugin_json = game_dir / "debug" / "zoom" / "plugin_debug.json" + assert plugin_json.is_file() + data = json.loads(plugin_json.read_text()) + assert data["prompt"] == "analyze this frame" + assert data["model"] == "gpt-4o" + + +# --------------------------------------------------------------------------- +# --scale and --smart CLI options +# --------------------------------------------------------------------------- + + +def test_render_short_scale_display(tmp_path: Path) -> None: + """--scale shows Scale: Nx in output.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--scale", + "1.3", + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Scale: 1.3x" in result.output + + +def test_render_short_scale_default_no_display(tmp_path: Path) -> None: + """Scale=1.0 (default) does NOT show Scale line.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Scale:" not in result.output + + +def test_render_short_smart_pad_deprecation_warning(tmp_path: Path) -> None: + """--crop smart_pad shows deprecation warning.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + from reeln.plugins.hooks import Hook + from reeln.plugins.registry import get_registry + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + zoom = ZoomPath( + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + def _activate_with_zoom(plugins_config: object) -> dict[str, object]: + get_registry().register( + Hook.ON_FRAMES_EXTRACTED, + lambda ctx: ctx.shared.update({"smart_zoom": {"zoom_path": zoom}}), + ) + return {} + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.commands.render.activate_plugins", side_effect=_activate_with_zoom), + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + mock_renderer_cls.return_value.render.return_value = _mock_result(tmp_path) + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "smart_pad", + ], + ) + + assert result.exit_code == 0 + assert "--crop smart_pad is deprecated" in result.output + + +def test_render_short_smart_flag_triggers_frames(tmp_path: Path) -> None: + """--smart flag triggers frame extraction like --crop smart.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + from reeln.plugins.hooks import Hook + from reeln.plugins.registry import get_registry + + frames = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + def _activate_with_zoom(plugins_config: object) -> dict[str, object]: + get_registry().register( + Hook.ON_FRAMES_EXTRACTED, + lambda ctx: ctx.shared.update({"smart_zoom": {"zoom_path": zoom}}), + ) + return {} + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.renderer.FFmpegRenderer") as mock_renderer_cls, + patch("reeln.commands.render.activate_plugins", side_effect=_activate_with_zoom), + ): + mock_renderer_cls.return_value.extract_frames.return_value = frames + mock_renderer_cls.return_value.render.return_value = _mock_result(tmp_path) + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--crop", + "pad", + "--smart", + ], + ) + + assert result.exit_code == 0 + assert "Smart zoom: 2 target points" in result.output + + +def test_render_preview_scale_display(tmp_path: Path) -> None: + """Preview also shows scale.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "preview", + str(clip), + "--scale", + "1.5", + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Scale: 1.5x" in result.output + + +# --------------------------------------------------------------------------- +# _find_game_dir — clip-aware resolution +# --------------------------------------------------------------------------- + + +def test_find_game_dir_prefers_clip_parent(tmp_path: Path) -> None: + """When clip is inside a game dir, that game dir is preferred over most recent.""" + from reeln.commands.render import _find_game_dir + + # Older game dir that contains the clip + game_a = tmp_path / "game_a" + game_a.mkdir() + (game_a / "game.json").write_text("{}") + clip = game_a / "period-1" / "clip.mp4" + clip.parent.mkdir() + clip.touch() + + # Newer game dir (should NOT be picked) + game_b = tmp_path / "game_b" + game_b.mkdir() + (game_b / "game.json").write_text("{}") + # Ensure game_b is more recent + import time + + time.sleep(0.01) + (game_b / "game.json").write_text("{}") + + result = _find_game_dir(tmp_path, clip=clip) + assert result == game_a + + +def test_find_game_dir_falls_back_to_most_recent_without_clip(tmp_path: Path) -> None: + """Without clip, falls back to most recently modified game.json.""" + from reeln.commands.render import _find_game_dir + + game_a = tmp_path / "game_a" + game_a.mkdir() + (game_a / "game.json").write_text("{}") + + import time + + time.sleep(0.01) + + game_b = tmp_path / "game_b" + game_b.mkdir() + (game_b / "game.json").write_text("{}") + + result = _find_game_dir(tmp_path) + assert result == game_b + + +def test_find_game_dir_clip_not_in_any_game_dir(tmp_path: Path) -> None: + """When clip isn't inside any game dir, falls back to most recent.""" + from reeln.commands.render import _find_game_dir + + game_a = tmp_path / "game_a" + game_a.mkdir() + (game_a / "game.json").write_text("{}") + + clip = tmp_path / "stray" / "clip.mp4" + clip.parent.mkdir() + clip.touch() + + result = _find_game_dir(tmp_path, clip=clip) + assert result == game_a + + +def test_find_game_dir_none_output_dir() -> None: + """Returns None when output_dir is None.""" + from reeln.commands.render import _find_game_dir + + assert _find_game_dir(None) is None + assert _find_game_dir(None, clip=Path("/tmp/clip.mp4")) is None + + +def test_find_game_dir_output_dir_is_game_dir(tmp_path: Path) -> None: + """When output_dir itself has game.json, returns it directly.""" + from reeln.commands.render import _find_game_dir + + (tmp_path / "game.json").write_text("{}") + result = _find_game_dir(tmp_path) + assert result == tmp_path + + +def test_find_game_dir_resolve_error_skips_candidate(tmp_path: Path) -> None: + """OSError during is_relative_to raises on resolved paths — candidate is skipped.""" + from reeln.commands.render import _find_game_dir + + game_a = tmp_path / "game_a" + game_a.mkdir() + (game_a / "game.json").write_text("{}") + + clip = game_a / "clip.mp4" + clip.touch() + + # Make is_relative_to raise OSError to hit the except branch + with patch.object(Path, "is_relative_to", side_effect=OSError("broken")): + result = _find_game_dir(tmp_path, clip=clip) + # Falls back to most recent since is_relative_to() failed + assert result == game_a + + +# --------------------------------------------------------------------------- +# --player-numbers flag +# --------------------------------------------------------------------------- + + +def _write_roster(path: Path) -> None: + """Write a sample roster CSV.""" + path.write_text( + "number,name,position\n48,John Smith,C\n24,Jane Doe,D\n2,Bob Jones,RW\n", + encoding="utf-8", + ) + + +def _game_state_with_level( + level: str = "bantam", + home_slug: str = "eagles", + away_slug: str = "bears", +) -> GameState: + """Create a GameState with level and slug fields populated.""" + return GameState( + game_info=GameInfo( + date="2026-03-04", + home_team="Eagles", + away_team="Bears", + sport="hockey", + level=level, + home_slug=home_slug, + away_slug=away_slug, + ), + created_at="2026-03-04T12:00:00+00:00", + ) + + +def test_player_numbers_with_valid_game_and_roster(tmp_path: Path) -> None: + """--player-numbers looks up scorer and assists from team roster.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + roster_path = tmp_path / "roster.csv" + _write_roster(roster_path) + + template = tmp_path / "overlay.ass" + template.write_text("Player: {{goal_scorer_text}}", encoding="utf-8") + + cfg_data = { + "render_profiles": { + "overlay": {"subtitle_template": str(template)}, + }, + } + cfg = tmp_path / "config.json" + cfg.write_text(json.dumps(cfg_data)) + + home_profile = TeamProfile( + team_name="Eagles", + short_name="EGL", + level="bantam", + roster_path=str(roster_path), + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), + patch("reeln.core.teams.load_team_profile", return_value=home_profile), + ): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48,24,2", + "--event-type", + "HOME_GOAL", + "--game-dir", + str(game_dir), + "--render-profile", + "overlay", + "--config", + str(cfg), + "--dry-run", + ], + ) + assert result.exit_code == 0, result.output + assert "Subtitle:" in result.output + + +def test_player_numbers_without_game_dir(tmp_path: Path) -> None: + """--player-numbers without a game directory exits with error.""" + clip = tmp_path / "clip.mkv" + clip.touch() + + cfg = tmp_path / "empty.json" + cfg.write_text(json.dumps({"config_version": 1})) + + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48", + "--config", + str(cfg), + "--dry-run", + ], + ) + assert result.exit_code == 1 + assert "requires a game directory" in result.output + + +def test_player_numbers_game_missing_level(tmp_path: Path) -> None: + """--player-numbers with game that has no level/slugs exits with error.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + # GameInfo with no level/slugs + state = GameState( + game_info=GameInfo( + date="2026-03-04", + home_team="Eagles", + away_team="Bears", + sport="hockey", + ), + created_at="2026-03-04T12:00:00+00:00", + ) + _write_game_state(game_dir, state) + + clip = tmp_path / "clip.mkv" + clip.touch() + + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48", + "--game-dir", + str(game_dir), + "--dry-run", + ], + ) + assert result.exit_code == 1 + assert "requires team profiles" in result.output + + +def test_player_numbers_missing_roster(tmp_path: Path) -> None: + """--player-numbers with team profile lacking roster_path exits with error.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + # Profile with no roster_path + home_profile = TeamProfile(team_name="Eagles", short_name="EGL", level="bantam") + + with patch("reeln.core.teams.load_team_profile", return_value=home_profile): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48", + "--game-dir", + str(game_dir), + "--dry-run", + ], + ) + assert result.exit_code == 1 + assert "No roster file configured" in result.output + + +def test_player_numbers_unknown_number_fallback(tmp_path: Path) -> None: + """Unknown jersey number falls back to '#N' display with warning.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + roster_path = tmp_path / "roster.csv" + roster_path.write_text("number,name,position\n48,John Smith,C\n", encoding="utf-8") + + template = tmp_path / "overlay.ass" + template.write_text("Player: {{goal_scorer_text}}", encoding="utf-8") + + cfg_data = { + "render_profiles": { + "overlay": {"subtitle_template": str(template)}, + }, + } + cfg = tmp_path / "config.json" + cfg.write_text(json.dumps(cfg_data)) + + home_profile = TeamProfile( + team_name="Eagles", + short_name="EGL", + level="bantam", + roster_path=str(roster_path), + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), + patch("reeln.core.teams.load_team_profile", return_value=home_profile), + ): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48,99", + "--game-dir", + str(game_dir), + "--render-profile", + "overlay", + "--config", + str(cfg), + "--dry-run", + ], + ) + # Should succeed despite unknown #99 + assert result.exit_code == 0, result.output + + +def test_player_numbers_explicit_player_overrides(tmp_path: Path) -> None: + """Explicit --player and --assists take precedence over --player-numbers roster lookup.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + roster_path = tmp_path / "roster.csv" + _write_roster(roster_path) + + template = tmp_path / "overlay.ass" + template.write_text("Player: {{goal_scorer_text}}", encoding="utf-8") + + cfg_data = { + "render_profiles": { + "overlay": {"subtitle_template": str(template)}, + }, + } + cfg = tmp_path / "config.json" + cfg.write_text(json.dumps(cfg_data)) + + home_profile = TeamProfile( + team_name="Eagles", + short_name="EGL", + level="bantam", + roster_path=str(roster_path), + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), + patch("reeln.core.teams.load_team_profile", return_value=home_profile), + ): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48,24", + "--player", + "Custom Player", + "--assists", + "Custom Assist", + "--game-dir", + str(game_dir), + "--render-profile", + "overlay", + "--config", + str(cfg), + "--dry-run", + ], + ) + assert result.exit_code == 0, result.output + + +def test_player_numbers_away_goal(tmp_path: Path) -> None: + """--event-type AWAY_GOAL resolves the away team's roster.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + roster_path = tmp_path / "roster.csv" + _write_roster(roster_path) + + template = tmp_path / "overlay.ass" + template.write_text("Player: {{goal_scorer_text}}", encoding="utf-8") + + cfg_data = { + "render_profiles": { + "overlay": {"subtitle_template": str(template)}, + }, + } + cfg = tmp_path / "config.json" + cfg.write_text(json.dumps(cfg_data)) + + away_profile = TeamProfile( + team_name="Bears", + short_name="BRS", + level="bantam", + roster_path=str(roster_path), + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), + patch("reeln.core.teams.load_team_profile", return_value=away_profile), + ): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48", + "--event-type", + "AWAY_GOAL", + "--game-dir", + str(game_dir), + "--render-profile", + "overlay", + "--config", + str(cfg), + "--dry-run", + ], + ) + assert result.exit_code == 0, result.output + + +def test_player_numbers_game_state_load_error(tmp_path: Path) -> None: + """--player-numbers with corrupt game.json exits with error.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + (game_dir / "game.json").write_text("not valid json") + + clip = tmp_path / "clip.mkv" + clip.touch() + + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48", + "--game-dir", + str(game_dir), + "--dry-run", + ], + ) + assert result.exit_code == 1 + assert "Error" in result.output + + +def test_player_numbers_team_profile_not_found(tmp_path: Path) -> None: + """--player-numbers with missing team profile exits with error.""" + from reeln.core.errors import ConfigError as _CE + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + with patch("reeln.core.teams.load_team_profile", side_effect=_CE("not found")): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48", + "--game-dir", + str(game_dir), + "--dry-run", + ], + ) + assert result.exit_code == 1 + assert "Team profile not found" in result.output + + +def test_player_numbers_roster_file_missing(tmp_path: Path) -> None: + """--player-numbers with missing roster file exits with error.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + home_profile = TeamProfile( + team_name="Eagles", + short_name="EGL", + level="bantam", + roster_path=str(tmp_path / "nonexistent.csv"), + ) + + with patch("reeln.core.teams.load_team_profile", return_value=home_profile): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48", + "--game-dir", + str(game_dir), + "--dry-run", + ], + ) + assert result.exit_code == 1 + assert "Roster file not found" in result.output + + +def test_player_numbers_on_preview(tmp_path: Path) -> None: + """--player-numbers works on render preview.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + roster_path = tmp_path / "roster.csv" + _write_roster(roster_path) + + home_profile = TeamProfile( + team_name="Eagles", + short_name="EGL", + level="bantam", + roster_path=str(roster_path), + ) + + with patch("reeln.core.teams.load_team_profile", return_value=home_profile): + result = runner.invoke( + app, + [ + "render", + "preview", + str(clip), + "--player-numbers", + "48", + "--game-dir", + str(game_dir), + "--dry-run", + ], + ) + assert result.exit_code == 0, result.output + + +def test_player_numbers_auto_applies_overlay_profile(tmp_path: Path) -> None: + """--player-numbers without -r auto-applies player-overlay profile.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + roster_path = tmp_path / "roster.csv" + _write_roster(roster_path) + + home_profile = TeamProfile( + team_name="Eagles", + short_name="EGL", + level="bantam", + roster_path=str(roster_path), + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), + patch("reeln.core.teams.load_team_profile", return_value=home_profile), + ): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--player-numbers", + "48,24", + "--game-dir", + str(game_dir), + "--dry-run", + ], + ) + assert result.exit_code == 0, result.output + # The bundled config includes player-overlay, so it should be auto-applied + assert "player-overlay" in result.output or "Dry run" in result.output + + +def test_player_numbers_on_apply(tmp_path: Path) -> None: + """--player-numbers works on render apply.""" + from reeln.models.team import TeamProfile + + game_dir = tmp_path / "game" + game_dir.mkdir() + _write_game_state(game_dir, _game_state_with_level()) + + clip = tmp_path / "clip.mkv" + clip.touch() + + roster_path = tmp_path / "roster.csv" + _write_roster(roster_path) + + template = tmp_path / "overlay.ass" + template.write_text("Player: {{goal_scorer_text}}", encoding="utf-8") + + cfg_data = { + "render_profiles": { + "overlay": {"subtitle_template": str(template)}, + }, + } + cfg = tmp_path / "config.json" + cfg.write_text(json.dumps(cfg_data)) + + home_profile = TeamProfile( + team_name="Eagles", + short_name="EGL", + level="bantam", + roster_path=str(roster_path), + ) + + with ( + patch("reeln.core.ffmpeg.discover_ffmpeg", return_value=Path("/usr/bin/ffmpeg")), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), + patch("reeln.core.teams.load_team_profile", return_value=home_profile), + ): + result = runner.invoke( + app, + [ + "render", + "apply", + str(clip), + "--render-profile", + "overlay", + "--player-numbers", + "48,24", + "--game-dir", + str(game_dir), + "--config", + str(cfg), + "--dry-run", + ], + ) + assert result.exit_code == 0, result.output + + +# --------------------------------------------------------------------------- +# --no-branding flag +# --------------------------------------------------------------------------- + + +def test_render_short_no_branding_flag(tmp_path: Path) -> None: + """--no-branding suppresses branding overlay.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--no-branding", + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Dry run" in result.output + + +def test_render_preview_no_branding_flag(tmp_path: Path) -> None: + """--no-branding on preview suppresses branding overlay.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "preview", + str(clip), + "--no-branding", + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Dry run" in result.output + + +def test_render_short_branding_enabled_by_default(tmp_path: Path) -> None: + """Without --no-branding, branding is resolved from config.""" + clip = tmp_path / "clip.mkv" + clip.touch() + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Dry run" in result.output + + +def test_render_short_branding_error_continues(tmp_path: Path) -> None: + """When branding resolution fails, render continues with a warning.""" + from unittest.mock import patch + + from reeln.core.errors import RenderError + + clip = tmp_path / "clip.mkv" + clip.touch() + with patch( + "reeln.core.branding.resolve_branding", + side_effect=RenderError("broken template"), + ): + result = runner.invoke( + app, + [ + "render", + "short", + str(clip), + "--dry-run", + ], + ) + assert result.exit_code == 0 + assert "Warning: Failed to resolve branding" in result.output + assert "Dry run" in result.output diff --git a/tests/unit/core/test_config.py b/tests/unit/core/test_config.py index 78277a4..f0c4d62 100644 --- a/tests/unit/core/test_config.py +++ b/tests/unit/core/test_config.py @@ -406,6 +406,39 @@ def test_config_to_dict_default_plugins_omitted() -> None: assert "plugins" not in d +def test_config_to_dict_default_branding_omitted() -> None: + cfg = AppConfig() + d = config_to_dict(cfg) + assert "branding" not in d + + +def test_config_to_dict_branding_disabled() -> None: + from reeln.models.branding import BrandingConfig + + cfg = AppConfig(branding=BrandingConfig(enabled=False)) + d = config_to_dict(cfg) + assert "branding" in d + assert d["branding"]["enabled"] is False + + +def test_config_to_dict_branding_custom_duration() -> None: + from reeln.models.branding import BrandingConfig + + cfg = AppConfig(branding=BrandingConfig(duration=7.0)) + d = config_to_dict(cfg) + assert "branding" in d + assert d["branding"]["duration"] == 7.0 + + +def test_config_to_dict_branding_custom_template() -> None: + from reeln.models.branding import BrandingConfig + + cfg = AppConfig(branding=BrandingConfig(template="/my/brand.ass")) + d = config_to_dict(cfg) + assert "branding" in d + assert d["branding"]["template"] == "/my/brand.ass" + + # --------------------------------------------------------------------------- # config_to_dict full=True (for config show) # --------------------------------------------------------------------------- @@ -417,6 +450,7 @@ def test_config_to_dict_full_includes_all_sections() -> None: d = config_to_dict(cfg, full=True) assert d["render_profiles"] == {} assert d["iterations"] == {} + assert d["branding"] == {"enabled": True, "template": "builtin:branding", "duration": 5.0} assert d["orchestration"] == {"upload_bitrate_kbps": 0, "sequential": True} assert d["plugins"] == { "enabled": [], @@ -535,6 +569,46 @@ def test_dict_to_config_plugins_not_dict_ignored() -> None: assert cfg.plugins.enabled == [] +def test_dict_to_config_branding_defaults() -> None: + cfg = dict_to_config({}) + assert cfg.branding.enabled is True + assert cfg.branding.template == "builtin:branding" + assert cfg.branding.duration == 5.0 + + +def test_dict_to_config_branding_disabled() -> None: + d = {"branding": {"enabled": False}} + cfg = dict_to_config(d) + assert cfg.branding.enabled is False + + +def test_dict_to_config_branding_custom_duration() -> None: + d = {"branding": {"duration": 5.0}} + cfg = dict_to_config(d) + assert cfg.branding.duration == 5.0 + + +def test_dict_to_config_branding_custom_template() -> None: + d = {"branding": {"template": "/my/brand.ass"}} + cfg = dict_to_config(d) + assert cfg.branding.template == "/my/brand.ass" + + +def test_dict_to_config_branding_not_dict_ignored() -> None: + cfg = dict_to_config({"branding": "bad"}) + assert cfg.branding.enabled is True + + +def test_config_branding_roundtrip() -> None: + from reeln.models.branding import BrandingConfig + + original = AppConfig(branding=BrandingConfig(enabled=False, duration=5.0)) + d = config_to_dict(original) + restored = dict_to_config(d) + assert restored.branding.enabled is False + assert restored.branding.duration == 5.0 + + def test_config_orchestration_plugins_roundtrip() -> None: original = AppConfig( orchestration=OrchestrationConfig(upload_bitrate_kbps=5000, sequential=False), @@ -718,6 +792,16 @@ def test_validate_config_plugins_valid() -> None: assert issues == [] +def test_validate_config_branding_not_dict() -> None: + issues = validate_config({"config_version": 1, "branding": "bad"}) + assert any("branding" in i for i in issues) + + +def test_validate_config_branding_valid() -> None: + issues = validate_config({"config_version": 1, "branding": {"enabled": False}}) + assert issues == [] + + def test_validate_config_iterations_valid() -> None: issues = validate_config({"config_version": 1, "iterations": {"default": ["fullspeed"]}}) assert issues == [] diff --git a/tests/unit/core/test_debug.py b/tests/unit/core/test_debug.py index 7adee9a..0fce811 100644 --- a/tests/unit/core/test_debug.py +++ b/tests/unit/core/test_debug.py @@ -273,8 +273,9 @@ def test_collect_debug_artifacts(tmp_path: Path) -> None: collected = collect_debug_artifacts(tmp_path) assert len(collected) == 2 - assert collected[0].operation == "op1" - assert collected[1].operation == "op2" + # Newest first (reverse chronological) + assert collected[0].operation == "op2" + assert collected[1].operation == "op1" def test_collect_debug_artifacts_empty_dir(tmp_path: Path) -> None: @@ -339,6 +340,7 @@ def test_write_debug_index_empty(tmp_path: Path) -> None: content = path.read_text(encoding="utf-8") assert "reeln Debug Index" in content assert "No debug artifacts found" in content + assert "logo.jpg" in content def test_write_debug_index_with_artifacts(tmp_path: Path) -> None: @@ -542,3 +544,62 @@ def test_debug_index_extra_section(tmp_path: Path) -> None: assert "segment_number" in content assert "Extra:" in content + + +# --------------------------------------------------------------------------- +# Zoom debug section in HTML index +# --------------------------------------------------------------------------- + + +def test_debug_index_zoom_section(tmp_path: Path) -> None: + """Zoom subdirectory is rendered in the HTML index.""" + zoom_dir = tmp_path / "debug" / "zoom" + zoom_dir.mkdir(parents=True) + (zoom_dir / "frame_0000.png").write_bytes(b"png") + (zoom_dir / "annotated_0000.png").write_bytes(b"png") + (zoom_dir / "zoom_path.json").write_text('{"test": true}') + + path = write_debug_index(tmp_path) + content = path.read_text(encoding="utf-8") + + assert "Smart Zoom Debug" in content + assert "zoom/frame_0000.png" in content + assert "zoom/annotated_0000.png" in content + assert "zoom/zoom_path.json" in content + assert "Annotated frames" in content + assert "Extracted frames" in content + + +def test_debug_index_zoom_plugin_debug(tmp_path: Path) -> None: + """Plugin debug JSON is rendered in the HTML index.""" + zoom_dir = tmp_path / "debug" / "zoom" + zoom_dir.mkdir(parents=True) + (zoom_dir / "plugin_debug.json").write_text('{"prompt": "analyze this frame", "model": "gpt-4o"}') + + path = write_debug_index(tmp_path) + content = path.read_text(encoding="utf-8") + + assert "Plugin debug data" in content + assert "analyze this frame" in content + assert "gpt-4o" in content + + +def test_debug_index_no_zoom_dir(tmp_path: Path) -> None: + """Without a zoom directory, no zoom section appears.""" + path = write_debug_index(tmp_path) + content = path.read_text(encoding="utf-8") + + assert "Smart Zoom Debug" not in content + + +def test_debug_index_zoom_corrupt_plugin_debug(tmp_path: Path) -> None: + """Corrupt plugin_debug.json is silently skipped.""" + zoom_dir = tmp_path / "debug" / "zoom" + zoom_dir.mkdir(parents=True) + (zoom_dir / "plugin_debug.json").write_text("not valid json!!!") + + path = write_debug_index(tmp_path) + content = path.read_text(encoding="utf-8") + + assert "Smart Zoom Debug" in content + assert "Plugin debug data" not in content diff --git a/tests/unit/core/test_ffmpeg.py b/tests/unit/core/test_ffmpeg.py index 892b020..745b1e1 100644 --- a/tests/unit/core/test_ffmpeg.py +++ b/tests/unit/core/test_ffmpeg.py @@ -12,8 +12,10 @@ from reeln.core.ffmpeg import ( _VIDEO_EXTENSIONS, build_concat_command, + build_extract_frame_command, build_render_command, build_short_command, + build_xfade_command, check_version, derive_ffprobe, discover_ffmpeg, @@ -301,9 +303,8 @@ def test_probe_resolution_invalid_values() -> None: D..... = Decoding supported .E.... = Encoding supported ------- - D.V.LS h264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 - DEV.LS libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (codec h264) - DEV.LS libx265 H.265 / HEVC (codec hevc) + DEV.LS h264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (encoders: libx264 libx264rgb h264_videotoolbox) + DEV.L. hevc H.265 / HEVC (High Efficiency Video Coding) (encoders: libx265 hevc_videotoolbox) DEA.LS aac AAC (Advanced Audio Coding) D.A.LS mp3 MP3 (MPEG audio layer 3) """ @@ -313,10 +314,13 @@ def test_list_codecs_success() -> None: with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_probe_proc(_CODECS_OUTPUT)): result = list_codecs(Path("/usr/bin/ffmpeg")) assert "libx264" in result + assert "libx264rgb" in result + assert "h264_videotoolbox" in result assert "libx265" in result + assert "hevc_videotoolbox" in result assert "aac" in result - # h264 has 'D' but not 'E' at position 1 — should NOT be included - assert "h264" not in result + # h264 is the codec family — included because it has 'E' flag + assert "h264" in result # mp3 is decode-only — should NOT be included assert "mp3" not in result @@ -339,6 +343,30 @@ def test_list_codecs_empty_output() -> None: assert result == [] +def test_list_codecs_without_encoders_section() -> None: + """Codec lines without (encoders: ...) still return the codec name.""" + output = """\ +Codecs: + ------- + DEA.LS aac AAC (Advanced Audio Coding) +""" + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_probe_proc(output)): + result = list_codecs(Path("/usr/bin/ffmpeg")) + assert result == ["aac"] + + +def test_list_codecs_malformed_encoders_no_close_paren() -> None: + """Malformed line with (encoders: but no closing ) still returns the codec name.""" + output = """\ +Codecs: + ------- + DEV.LS h264 H.264 (encoders: libx264 +""" + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_probe_proc(output)): + result = list_codecs(Path("/usr/bin/ffmpeg")) + assert result == ["h264"] + + _HWACCELS_OUTPUT = """\ Hardware acceleration methods: videotoolbox @@ -445,6 +473,62 @@ def test_build_concat_command_reencode(tmp_path: Path) -> None: ] +def test_build_xfade_command_two_files(tmp_path: Path) -> None: + ffmpeg = Path("/usr/bin/ffmpeg") + a = tmp_path / "a.mp4" + b = tmp_path / "b.mp4" + output = tmp_path / "out.mp4" + cmd = build_xfade_command(ffmpeg, [a, b], [10.0, 8.0], output) + assert str(ffmpeg) in cmd + assert "-filter_complex" in cmd + fc = cmd[cmd.index("-filter_complex") + 1] + assert "xfade=transition=fade" in fc + assert "acrossfade" in fc + assert "[vout]" in fc + assert "[aout]" in fc + assert "-map" in cmd + + +def test_build_xfade_command_three_files(tmp_path: Path) -> None: + ffmpeg = Path("/usr/bin/ffmpeg") + files = [tmp_path / f"{i}.mp4" for i in range(3)] + output = tmp_path / "out.mp4" + cmd = build_xfade_command(ffmpeg, files, [10.0, 8.0, 6.0], output) + fc = cmd[cmd.index("-filter_complex") + 1] + # Should have 2 xfade and 2 acrossfade stages + assert fc.count("xfade=") == 2 + assert fc.count("acrossfade=") == 2 + # Intermediate labels + assert "[xf0]" in fc + assert "[af0]" in fc + + +def test_build_xfade_command_mismatched_lengths(tmp_path: Path) -> None: + ffmpeg = Path("/usr/bin/ffmpeg") + a = tmp_path / "a.mp4" + with pytest.raises(FFmpegError, match="same length"): + build_xfade_command(ffmpeg, [a], [10.0, 8.0], tmp_path / "out.mp4") + + +def test_build_xfade_command_too_few_files(tmp_path: Path) -> None: + ffmpeg = Path("/usr/bin/ffmpeg") + a = tmp_path / "a.mp4" + with pytest.raises(FFmpegError, match="at least 2"): + build_xfade_command(ffmpeg, [a], [10.0], tmp_path / "out.mp4") + + +def test_build_xfade_command_clamps_fade_duration(tmp_path: Path) -> None: + """Fade duration is clamped to half the shortest clip.""" + ffmpeg = Path("/usr/bin/ffmpeg") + a = tmp_path / "a.mp4" + b = tmp_path / "b.mp4" + output = tmp_path / "out.mp4" + cmd = build_xfade_command(ffmpeg, [a, b], [10.0, 0.6], output, fade_duration=5.0) + fc = cmd[cmd.index("-filter_complex") + 1] + # fade should be clamped to 0.3 (half of 0.6) + assert "duration=0.3:" in fc + + def test_build_render_command_basic(tmp_path: Path) -> None: ffmpeg = Path("/usr/bin/ffmpeg") input_path = tmp_path / "clip.mkv" @@ -648,6 +732,33 @@ def test_build_short_command_no_audio_filter(tmp_path: Path) -> None: assert "-filter_complex" in cmd +def test_build_short_command_speed_segments_maps_outputs(tmp_path: Path) -> None: + """When filter_complex contains [vfinal] and [afinal], -map flags are added.""" + fc = ( + "[0:v]split=2[v0][v1];" + "[v0]trim=0:5,setpts=PTS-STARTPTS[sv0];" + "[v1]trim=5,setpts=PTS-STARTPTS[sv1];" + "[sv0][sv1]concat=n=2:v=1:a=0[_vout];" + "[_vout]scale=756:-2:flags=lanczos[vfinal];" + "[0:a]asplit=2[a0][a1];" + "[a0]atrim=0:5,asetpts=PTS-STARTPTS[sa0];" + "[a1]atrim=5,asetpts=PTS-STARTPTS[sa1];" + "[sa0][sa1]concat=n=2:v=0:a=1[afinal]" + ) + plan = RenderPlan( + inputs=[tmp_path / "clip.mkv"], + output=tmp_path / "out.mp4", + filter_complex=fc, + ) + cmd = build_short_command(Path("/usr/bin/ffmpeg"), plan) + assert "-map" in cmd + map_indices = [i for i, v in enumerate(cmd) if v == "-map"] + assert len(map_indices) == 2 + assert cmd[map_indices[0] + 1] == "[vfinal]" + assert cmd[map_indices[1] + 1] == "[afinal]" + assert "-af" not in cmd + + def test_build_short_command_no_filters(tmp_path: Path) -> None: plan = RenderPlan( inputs=[tmp_path / "clip.mkv"], @@ -695,3 +806,43 @@ def test_build_short_command_custom_encoding(tmp_path: Path) -> None: assert cmd[idx + 1] == "opus" idx = cmd.index("-b:a") assert cmd[idx + 1] == "192k" + + +# --------------------------------------------------------------------------- +# build_extract_frame_command — golden assertions +# --------------------------------------------------------------------------- + + +def test_build_extract_frame_command_basic(tmp_path: Path) -> None: + ffmpeg = Path("/usr/bin/ffmpeg") + input_path = tmp_path / "clip.mkv" + output_path = tmp_path / "frame_0000.png" + cmd = build_extract_frame_command(ffmpeg, input_path, 5.0, output_path) + assert cmd == [ + "/usr/bin/ffmpeg", + "-y", + "-v", + "error", + "-ss", + "5.000", + "-i", + str(input_path), + "-frames:v", + "1", + "-update", + "1", + str(output_path), + ] + + +def test_build_extract_frame_command_zero_timestamp(tmp_path: Path) -> None: + cmd = build_extract_frame_command(Path("/usr/bin/ffmpeg"), tmp_path / "clip.mkv", 0.0, tmp_path / "f.png") + assert "-ss" in cmd + idx = cmd.index("-ss") + assert cmd[idx + 1] == "0.000" + + +def test_build_extract_frame_command_fractional_timestamp(tmp_path: Path) -> None: + cmd = build_extract_frame_command(Path("/usr/bin/ffmpeg"), tmp_path / "clip.mkv", 3.141, tmp_path / "f.png") + idx = cmd.index("-ss") + assert cmd[idx + 1] == "3.141" diff --git a/tests/unit/core/test_finish.py b/tests/unit/core/test_finish.py index d717800..79f941b 100644 --- a/tests/unit/core/test_finish.py +++ b/tests/unit/core/test_finish.py @@ -8,7 +8,7 @@ import pytest from reeln.core.errors import MediaError -from reeln.core.finish import _build_summary, finish_game +from reeln.core.finish import _build_summary, finish_game, relocate_outputs from reeln.models.game import ( GameEvent, GameInfo, @@ -217,3 +217,138 @@ def test_finish_game_dry_run_no_hook(tmp_path: Path) -> None: finish_game(tmp_path, dry_run=True) assert len(emitted) == 0 + + +# --------------------------------------------------------------------------- +# relocate_outputs +# --------------------------------------------------------------------------- + + +def test_relocate_outputs_moves_files(tmp_path: Path) -> None: + """Segment and highlights outputs are moved into game_dir/outputs/.""" + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + # Create output files in game_dir.parent (tmp_path) + seg = tmp_path / "period-1_2026-03-15.mkv" + seg.write_bytes(b"segment") + hl = tmp_path / "a_vs_b_2026-03-15.mkv" + hl.write_bytes(b"highlights") + + state = _make_state( + segment_outputs=["period-1_2026-03-15.mkv"], + highlights_output="a_vs_b_2026-03-15.mkv", + ) + + relocated, messages = relocate_outputs(game_dir, state) + + assert len(relocated) == 2 + assert (game_dir / "outputs" / "period-1_2026-03-15.mkv").is_file() + assert (game_dir / "outputs" / "a_vs_b_2026-03-15.mkv").is_file() + assert not seg.exists() + assert not hl.exists() + assert any("Relocated" in m for m in messages) + + +def test_relocate_outputs_dry_run(tmp_path: Path) -> None: + """Dry run reports but does not move files.""" + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + seg = tmp_path / "period-1_2026-03-15.mkv" + seg.write_bytes(b"segment") + + state = _make_state(segment_outputs=["period-1_2026-03-15.mkv"]) + + relocated, messages = relocate_outputs(game_dir, state, dry_run=True) + + assert len(relocated) == 1 + assert seg.exists() # not moved + assert not (game_dir / "outputs").exists() + assert any("Would relocate" in m for m in messages) + + +def test_relocate_outputs_missing_files(tmp_path: Path) -> None: + """Missing output files are skipped gracefully.""" + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + + state = _make_state( + segment_outputs=["period-1_2026-03-15.mkv"], + highlights_output="a_vs_b_2026-03-15.mkv", + ) + + relocated, messages = relocate_outputs(game_dir, state) + + assert relocated == [] + assert messages == [] + + +def test_relocate_outputs_no_outputs(tmp_path: Path) -> None: + """No outputs in state means nothing to relocate.""" + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + + state = _make_state() + + relocated, messages = relocate_outputs(game_dir, state) + + assert relocated == [] + assert messages == [] + + +def test_relocate_outputs_partial_missing(tmp_path: Path) -> None: + """Some files exist, some don't — only existing ones are moved.""" + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + seg = tmp_path / "period-1_2026-03-15.mkv" + seg.write_bytes(b"segment") + # period-2 doesn't exist + + state = _make_state( + segment_outputs=["period-1_2026-03-15.mkv", "period-2_2026-03-15.mkv"], + ) + + relocated, messages = relocate_outputs(game_dir, state) + + assert len(relocated) == 1 + assert (game_dir / "outputs" / "period-1_2026-03-15.mkv").is_file() + assert len(messages) == 1 + + +# --------------------------------------------------------------------------- +# finish_game — relocate integration +# --------------------------------------------------------------------------- + + +def test_finish_game_relocates_outputs(tmp_path: Path) -> None: + """finish_game calls relocate_outputs and includes messages.""" + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + seg = tmp_path / "period-1_2026-03-15.mkv" + seg.write_bytes(b"segment") + + state = _make_state(segment_outputs=["period-1_2026-03-15.mkv"]) + _write_state(game_dir, state) + + result, messages = finish_game(game_dir) + + assert result.finished is True + assert (game_dir / "outputs" / "period-1_2026-03-15.mkv").is_file() + assert not seg.exists() + assert any("Relocated" in m for m in messages) + + +def test_finish_game_dry_run_no_relocate(tmp_path: Path) -> None: + """Dry run does not relocate outputs.""" + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + seg = tmp_path / "period-1_2026-03-15.mkv" + seg.write_bytes(b"segment") + + state = _make_state(segment_outputs=["period-1_2026-03-15.mkv"]) + _write_state(game_dir, state) + + result, messages = finish_game(game_dir, dry_run=True) + + assert result.finished is True + assert seg.exists() # not moved + assert not any("Relocated" in m for m in messages) diff --git a/tests/unit/core/test_highlights.py b/tests/unit/core/test_highlights.py index 83a5057..92ad57e 100644 --- a/tests/unit/core/test_highlights.py +++ b/tests/unit/core/test_highlights.py @@ -17,6 +17,7 @@ create_game_directory, detect_next_game_number, find_segment_videos, + find_unfinished_games, game_dir_name, init_game, load_game_state, @@ -166,11 +167,16 @@ def test_init_game_dry_run(tmp_path: Path) -> None: def test_init_game_auto_game_number(tmp_path: Path) -> None: + from reeln.core.finish import finish_game + # Create first game info1 = GameInfo(date="2026-02-26", home_team="a", away_team="b", sport="hockey") game_dir1, _ = init_game(tmp_path, info1) assert game_dir1.name == "2026-02-26_a_vs_b" + # Finish first game before starting second + finish_game(game_dir1) + # Second init auto-detects double-header info2 = GameInfo(date="2026-02-26", home_team="a", away_team="b", sport="hockey") game_dir2, _ = init_game(tmp_path, info2) @@ -374,6 +380,20 @@ def test_find_segment_videos_excludes_subdirectories(tmp_path: Path) -> None: assert len(result) == 1 +def test_find_segment_videos_excludes_shorts_subdir(tmp_path: Path) -> None: + """Shorts rendered into a shorts/ subdirectory are not picked up.""" + seg_dir = tmp_path / "period-1" + seg_dir.mkdir() + (seg_dir / "replay1.mkv").touch() + shorts_dir = seg_dir / "shorts" + shorts_dir.mkdir() + (shorts_dir / "replay1_short.mp4").touch() + + result = find_segment_videos(seg_dir, "period-1") + assert len(result) == 1 + assert result[0].name == "replay1.mkv" + + def test_find_segment_videos_multiple_extensions(tmp_path: Path) -> None: seg_dir = tmp_path / "period-1" seg_dir.mkdir() @@ -1041,10 +1061,11 @@ def fake_plugin(ctx: HookContext) -> None: get_registry().register(Hook.ON_GAME_INIT, fake_plugin) info = GameInfo(date="2026-02-26", home_team="a", away_team="b", sport="hockey") - game_dir, _ = init_game(tmp_path, info) + game_dir, messages = init_game(tmp_path, info) state = load_game_state(game_dir) assert state.livestreams == {"google": "https://youtube.com/live/abc123"} + assert any("Livestream (google): https://youtube.com/live/abc123" in m for m in messages) def test_init_game_emits_on_game_ready(tmp_path: Path) -> None: @@ -1320,3 +1341,240 @@ def test_merge_highlights_dry_run_no_hook(tmp_path: Path) -> None: merge_game_highlights(game_dir, ffmpeg_path=ffmpeg, dry_run=True) assert len(emitted) == 0 + + +# --------------------------------------------------------------------------- +# find_unfinished_games +# --------------------------------------------------------------------------- + + +def _write_state_file(game_dir: Path, state: GameState) -> None: + """Write a game.json to *game_dir* for test setup.""" + game_dir.mkdir(parents=True, exist_ok=True) + save_game_state(state, game_dir) + + +def test_find_unfinished_games_none(tmp_path: Path) -> None: + assert find_unfinished_games(tmp_path) == [] + + +def test_find_unfinished_games_missing_dir(tmp_path: Path) -> None: + assert find_unfinished_games(tmp_path / "nonexistent") == [] + + +def test_find_unfinished_games_finds_unfinished(tmp_path: Path) -> None: + gi = GameInfo(date="2026-03-15", home_team="a", away_team="b", sport="hockey") + game_dir = tmp_path / "2026-03-15_a_vs_b" + _write_state_file(game_dir, GameState(game_info=gi)) + + result = find_unfinished_games(tmp_path) + assert len(result) == 1 + assert result[0] == game_dir + + +def test_find_unfinished_games_skips_finished(tmp_path: Path) -> None: + gi = GameInfo(date="2026-03-15", home_team="a", away_team="b", sport="hockey") + game_dir = tmp_path / "2026-03-15_a_vs_b" + _write_state_file(game_dir, GameState(game_info=gi, finished=True, finished_at="2026-03-15T20:00:00+00:00")) + + result = find_unfinished_games(tmp_path) + assert result == [] + + +def test_find_unfinished_games_mixed(tmp_path: Path) -> None: + gi1 = GameInfo(date="2026-03-15", home_team="a", away_team="b", sport="hockey") + gi2 = GameInfo(date="2026-03-15", home_team="c", away_team="d", sport="hockey") + g1 = tmp_path / "2026-03-15_a_vs_b" + g2 = tmp_path / "2026-03-15_c_vs_d" + _write_state_file(g1, GameState(game_info=gi1, finished=True, finished_at="2026-03-15T18:00:00+00:00")) + _write_state_file(g2, GameState(game_info=gi2)) + + result = find_unfinished_games(tmp_path) + assert result == [g2] + + +def test_find_unfinished_games_skips_non_dirs(tmp_path: Path) -> None: + (tmp_path / "not_a_dir.txt").write_text("hello") + assert find_unfinished_games(tmp_path) == [] + + +def test_find_unfinished_games_skips_invalid_json(tmp_path: Path) -> None: + game_dir = tmp_path / "2026-03-15_a_vs_b" + game_dir.mkdir() + (game_dir / "game.json").write_text("not valid json") + + assert find_unfinished_games(tmp_path) == [] + + +def test_find_unfinished_games_skips_dirs_without_state(tmp_path: Path) -> None: + (tmp_path / "some_dir").mkdir() + assert find_unfinished_games(tmp_path) == [] + + +# --------------------------------------------------------------------------- +# init_game — unfinished game guard +# --------------------------------------------------------------------------- + + +def test_init_game_blocks_if_unfinished(tmp_path: Path) -> None: + """init_game raises MediaError when an unfinished game exists.""" + gi1 = GameInfo(date="2026-03-15", home_team="ducks", away_team="kraken", sport="hockey") + g1 = tmp_path / "2026-03-15_ducks_vs_kraken" + _write_state_file(g1, GameState(game_info=gi1)) + + gi2 = GameInfo(date="2026-03-15", home_team="wild", away_team="jets", sport="hockey") + with pytest.raises(MediaError, match=r"Unfinished game.*ducks_vs_kraken"): + init_game(tmp_path, gi2) + + +def test_init_game_allows_after_finish(tmp_path: Path) -> None: + """init_game succeeds when all prior games are finished.""" + gi1 = GameInfo(date="2026-03-15", home_team="ducks", away_team="kraken", sport="hockey") + g1 = tmp_path / "2026-03-15_ducks_vs_kraken" + _write_state_file(g1, GameState(game_info=gi1, finished=True, finished_at="2026-03-15T18:00:00+00:00")) + + gi2 = GameInfo(date="2026-03-15", home_team="wild", away_team="jets", sport="hockey") + game_dir, _messages = init_game(tmp_path, gi2) + + assert game_dir.is_dir() + assert "wild" in game_dir.name + + +def test_init_game_dry_run_still_checks_unfinished(tmp_path: Path) -> None: + """Unfinished game guard fires even in dry-run mode.""" + gi1 = GameInfo(date="2026-03-15", home_team="ducks", away_team="kraken", sport="hockey") + g1 = tmp_path / "2026-03-15_ducks_vs_kraken" + _write_state_file(g1, GameState(game_info=gi1)) + + gi2 = GameInfo(date="2026-03-15", home_team="wild", away_team="jets", sport="hockey") + with pytest.raises(MediaError, match=r"Unfinished game"): + init_game(tmp_path, gi2, dry_run=True) + + +# --------------------------------------------------------------------------- +# process_segment — output tracking +# --------------------------------------------------------------------------- + + +def test_process_segment_records_output(tmp_path: Path) -> None: + """process_segment appends the output filename to state.segment_outputs.""" + game_dir = _make_game_dir(tmp_path) + seg_dir = game_dir / "period-1" + (seg_dir / "replay1.mkv").touch() + + ffmpeg = Path("/usr/bin/ffmpeg") + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_ffmpeg_success()): + process_segment(game_dir, 1, ffmpeg_path=ffmpeg) + + state = load_game_state(game_dir) + assert "period-1_2026-02-26.mkv" in state.segment_outputs + + +def test_process_segment_output_idempotent(tmp_path: Path) -> None: + """Running segment twice doesn't duplicate segment_outputs.""" + game_dir = _make_game_dir(tmp_path) + seg_dir = game_dir / "period-1" + (seg_dir / "replay1.mkv").touch() + + ffmpeg = Path("/usr/bin/ffmpeg") + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_ffmpeg_success()): + process_segment(game_dir, 1, ffmpeg_path=ffmpeg) + (seg_dir / "replay2.mkv").touch() + process_segment(game_dir, 1, ffmpeg_path=ffmpeg) + + state = load_game_state(game_dir) + assert state.segment_outputs.count("period-1_2026-02-26.mkv") == 1 + + +def test_process_segment_dry_run_no_output_tracking(tmp_path: Path) -> None: + """Dry run does not record segment outputs.""" + game_dir = _make_game_dir(tmp_path) + seg_dir = game_dir / "period-1" + (seg_dir / "replay1.mkv").touch() + + ffmpeg = Path("/usr/bin/ffmpeg") + process_segment(game_dir, 1, ffmpeg_path=ffmpeg, dry_run=True) + + state = load_game_state(game_dir) + assert state.segment_outputs == [] + + +# --------------------------------------------------------------------------- +# merge_game_highlights — output tracking +# --------------------------------------------------------------------------- + + +def test_merge_highlights_records_output(tmp_path: Path) -> None: + """merge_game_highlights records the highlights output filename.""" + game_dir = _make_game_dir(tmp_path) + for i in range(1, 4): + (tmp_path / f"period-{i}_2026-02-26.mkv").touch() + + ffmpeg = Path("/usr/bin/ffmpeg") + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_ffmpeg_success()): + merge_game_highlights(game_dir, ffmpeg_path=ffmpeg) + + state = load_game_state(game_dir) + assert state.highlights_output == "roseville_vs_mahtomedi_2026-02-26.mkv" + + +def test_merge_highlights_dry_run_no_output_tracking(tmp_path: Path) -> None: + """Dry run does not record highlights output.""" + game_dir = _make_game_dir(tmp_path) + for i in range(1, 4): + (tmp_path / f"period-{i}_2026-02-26.mkv").touch() + + ffmpeg = Path("/usr/bin/ffmpeg") + merge_game_highlights(game_dir, ffmpeg_path=ffmpeg, dry_run=True) + + state = load_game_state(game_dir) + assert state.highlights_output == "" + + +# --------------------------------------------------------------------------- +# Output extension matching +# --------------------------------------------------------------------------- + + +def test_process_segment_output_matches_input_ext(tmp_path: Path) -> None: + """Output extension matches input files when all are the same format.""" + game_dir = _make_game_dir(tmp_path) + seg_dir = game_dir / "period-1" + (seg_dir / "replay1.mp4").write_bytes(b"video") + + ffmpeg = Path("/usr/bin/ffmpeg") + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_ffmpeg_success()): + result, _ = process_segment(game_dir, 1, ffmpeg_path=ffmpeg) + + assert result.output.suffix == ".mp4" + assert result.output.name == "period-1_2026-02-26.mp4" + + +def test_process_segment_mixed_ext_defaults_mkv(tmp_path: Path) -> None: + """Mixed input extensions fall back to .mkv output.""" + game_dir = _make_game_dir(tmp_path) + seg_dir = game_dir / "period-1" + (seg_dir / "replay1.mkv").write_bytes(b"video") + (seg_dir / "replay2.mp4").write_bytes(b"video") + + ffmpeg = Path("/usr/bin/ffmpeg") + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_ffmpeg_success()): + result, _ = process_segment(game_dir, 1, ffmpeg_path=ffmpeg) + + assert result.output.suffix == ".mkv" + assert result.copy is False + + +def test_merge_highlights_finds_mp4_segments(tmp_path: Path) -> None: + """merge_game_highlights discovers .mp4 segment files.""" + game_dir = _make_game_dir(tmp_path) + for i in range(1, 4): + (tmp_path / f"period-{i}_2026-02-26.mp4").touch() + + ffmpeg = Path("/usr/bin/ffmpeg") + with patch("reeln.core.ffmpeg.subprocess.run", return_value=_mock_ffmpeg_success()): + result, _ = merge_game_highlights(game_dir, ffmpeg_path=ffmpeg) + + assert result.output.suffix == ".mp4" + assert result.output.name == "roseville_vs_mahtomedi_2026-02-26.mp4" + assert len(result.segment_files) == 3 diff --git a/tests/unit/core/test_iterations.py b/tests/unit/core/test_iterations.py index ba1cb98..22bce6b 100644 --- a/tests/unit/core/test_iterations.py +++ b/tests/unit/core/test_iterations.py @@ -18,6 +18,15 @@ _MOD = "reeln.core.iterations" +@pytest.fixture(autouse=True) +def _mock_hook_registry() -> object: # type: ignore[misc] + """Suppress POST_RENDER hook emission from render_iterations().""" + with patch("reeln.plugins.registry.get_registry") as mock_get: + mock_registry = MagicMock() + mock_get.return_value = mock_registry + yield mock_registry + + def _make_config(**profile_overrides: RenderProfile) -> AppConfig: profiles: dict[str, RenderProfile] = { "fullspeed": RenderProfile(name="fullspeed", speed=1.0), @@ -128,7 +137,7 @@ def test_single_profile_full_frame(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -167,7 +176,7 @@ def test_multiple_profiles_full_frame_concat(tmp_path: Path) -> None: call_count = [0] - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: temp = _iteration_temp(output, call_count[0]) temp.write_bytes(b"rendered") call_count[0] += 1 @@ -175,18 +184,14 @@ def fake_render(plan: object) -> RenderResult: with ( patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, - patch(f"{_MOD}.write_concat_file") as mock_concat_file, - patch(f"{_MOD}.build_concat_command", return_value=["ffmpeg"]), + patch(f"{_MOD}.probe_duration", return_value=10.0), + patch(f"{_MOD}.build_xfade_command", return_value=["ffmpeg"]), patch(f"{_MOD}.run_ffmpeg") as mock_run, ): mock_instance = MagicMock() mock_instance.render.side_effect = fake_render MockRenderer.return_value = mock_instance - concat_tmp = tmp_path / "concat.txt" - concat_tmp.write_text("file list") - mock_concat_file.return_value = concat_tmp - result, messages = render_iterations( clip, ["fullspeed", "slowmo"], @@ -195,11 +200,10 @@ def fake_render(plan: object) -> RenderResult: output, ) - assert result.concat_copy is True + assert result.concat_copy is False assert len(result.iteration_outputs) == 2 assert result.profile_names == ["fullspeed", "slowmo"] assert any("Concatenated 2 iterations" in m for m in messages) - mock_concat_file.assert_called_once() mock_run.assert_called_once() @@ -223,7 +227,7 @@ def test_single_profile_short_form(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -269,7 +273,7 @@ def test_multiple_profiles_short_form_concat(tmp_path: Path) -> None: call_count = [0] - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: temp = _iteration_temp(output, call_count[0]) temp.write_bytes(b"rendered") call_count[0] += 1 @@ -299,7 +303,7 @@ def fake_render(plan: object) -> RenderResult: short_config=short_cfg, ) - assert result.concat_copy is True + assert result.concat_copy is False assert len(result.iteration_outputs) == 2 @@ -326,7 +330,7 @@ def test_subtitle_template_resolved_and_cleaned(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) rendered_sub_path: list[Path] = [] - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -376,7 +380,7 @@ def test_subtitle_resolve_returns_none(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -415,7 +419,7 @@ def test_error_cleanup_on_render_failure(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) - def fake_render_fail(plan: object) -> RenderResult: + def fake_render_fail(plan: object, **kwargs: object) -> RenderResult: if not iter0.exists(): iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -455,7 +459,7 @@ def test_default_context_used(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -528,7 +532,7 @@ def test_event_metadata_enriches_context(tmp_path: Path) -> None: captured_plans: list[RenderPlan] = [] - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: captured_plans.append(plan) # type: ignore[arg-type] iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -565,7 +569,7 @@ def test_event_metadata_none_no_enrichment(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -600,7 +604,7 @@ def test_event_metadata_probe_returns_none(tmp_path: Path) -> None: iter0 = _iteration_temp(output, 0) - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -640,7 +644,7 @@ def test_short_form_without_short_config_uses_full_frame(tmp_path: Path) -> None iter0 = _iteration_temp(output, 0) - def fake_render(plan: object) -> RenderResult: + def fake_render(plan: object, **kwargs: object) -> RenderResult: iter0.write_bytes(b"rendered") return _mock_render_result(iter0) @@ -666,3 +670,578 @@ def fake_render(plan: object) -> RenderResult: # Should use plan_full_frame, not plan_short mock_plan.assert_called_once() assert result.output == output + + +# --------------------------------------------------------------------------- +# zoom_path and source_fps forwarded to plan_short +# --------------------------------------------------------------------------- + + +def test_zoom_path_forwarded_to_plan_short(tmp_path: Path) -> None: + """zoom_path and source_fps are passed through to plan_short().""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + config = _make_config() + + short_cfg = ShortConfig( + input=clip, + output=output, + width=1080, + height=1920, + smart=True, + ) + + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + iter0 = _iteration_temp(output, 0) + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + iter0.write_bytes(b"rendered") + return _mock_render_result(iter0) + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.plan_short") as mock_plan, + patch(f"{_MOD}.run_ffmpeg"), + ): + mock_plan.return_value = RenderPlan(inputs=[clip], output=iter0) + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + result, _ = render_iterations( + clip, + ["fullspeed"], + config, + Path("/usr/bin/ffmpeg"), + output, + is_short=True, + short_config=short_cfg, + zoom_path=zoom, + source_fps=59.94, + ) + + mock_plan.assert_called_once() + call_kwargs = mock_plan.call_args + assert call_kwargs.kwargs.get("zoom_path") is zoom + assert call_kwargs.kwargs.get("source_fps") == 59.94 + assert result.output == output + + +def test_zoom_path_none_by_default(tmp_path: Path) -> None: + """When zoom_path is not provided, plan_short gets None.""" + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + config = _make_config() + + short_cfg = ShortConfig( + input=clip, + output=output, + width=1080, + height=1920, + ) + + iter0 = _iteration_temp(output, 0) + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + iter0.write_bytes(b"rendered") + return _mock_render_result(iter0) + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.plan_short") as mock_plan, + patch(f"{_MOD}.run_ffmpeg"), + ): + mock_plan.return_value = RenderPlan(inputs=[clip], output=iter0) + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + result, _ = render_iterations( + clip, + ["fullspeed"], + config, + Path("/usr/bin/ffmpeg"), + output, + is_short=True, + short_config=short_cfg, + ) + + mock_plan.assert_called_once() + call_kwargs = mock_plan.call_args + assert call_kwargs.kwargs.get("zoom_path") is None + assert call_kwargs.kwargs.get("source_fps") == 30.0 + assert result.output == output + + +# --------------------------------------------------------------------------- +# speed_segments + smart zoom path remapping +# --------------------------------------------------------------------------- + + +def test_speed_segments_profile_remaps_zoom_path(tmp_path: Path) -> None: + """Profile with speed_segments remaps zoom path timestamps for smart tracking.""" + from reeln.models.profile import SpeedSegment + from reeln.models.zoom import ZoomPath, ZoomPoint + + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + + slowmo_profile = RenderProfile( + name="slowmo", + speed_segments=( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0, until=None), + ), + ) + config = _make_config(slowmo=slowmo_profile) + + short_cfg = ShortConfig( + input=clip, + output=output, + width=1080, + height=1920, + smart=True, + ) + + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + iter0 = _iteration_temp(output, 0) + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + iter0.write_bytes(b"rendered") + return _mock_render_result(iter0) + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.plan_short") as mock_plan, + patch(f"{_MOD}.run_ffmpeg"), + ): + mock_plan.return_value = RenderPlan(inputs=[clip], output=iter0) + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + _result, _messages = render_iterations( + clip, + ["slowmo"], + config, + Path("/usr/bin/ffmpeg"), + output, + is_short=True, + short_config=short_cfg, + zoom_path=zoom, + source_fps=60.0, + ) + + # smart should be preserved, zoom_path should be remapped (not original) + mock_plan.assert_called_once() + call_args = mock_plan.call_args + modified_cfg = call_args[0][0] + assert modified_cfg.smart is True + remapped_zoom = call_args.kwargs.get("zoom_path") + assert remapped_zoom is not None + assert remapped_zoom is not zoom # should be a new remapped object + # Duration should be stretched: 5/1 + 3/0.5 + 2/1 = 13.0 + assert remapped_zoom.duration == 13.0 + assert _result.output == output + + +def test_speed_segments_overlay_duration_adjusted(tmp_path: Path) -> None: + """Overlay duration accounts for speed_segments time stretch.""" + from reeln.models.profile import SpeedSegment + + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + + slowmo_profile = RenderProfile( + name="slowmo", + subtitle_template="builtin:goal_overlay", + speed_segments=( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0, until=None), + ), + ) + config = _make_config(slowmo=slowmo_profile) + + short_cfg = ShortConfig( + input=clip, output=output, width=1080, height=1920, + ) + + iter0 = _iteration_temp(output, 0) + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + iter0.write_bytes(b"rendered") + return _mock_render_result(iter0) + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.plan_short") as mock_plan, + patch(f"{_MOD}.run_ffmpeg"), + patch("reeln.core.ffmpeg.probe_duration", return_value=10.0), + patch(f"{_MOD}.resolve_subtitle_for_profile") as mock_sub, + patch("reeln.core.overlay.build_overlay_context") as mock_overlay, + ): + mock_overlay.return_value = TemplateContext() + mock_sub.return_value = None + mock_plan.return_value = RenderPlan(inputs=[clip], output=iter0) + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + render_iterations( + clip, ["slowmo"], config, Path("/usr/bin/ffmpeg"), output, + is_short=True, short_config=short_cfg, + event_metadata={"assists": "A, B"}, + ) + + # speed_segments: 5s@1x + 3s@0.5x + 2s@1x = 5 + 6 + 2 = 13s + mock_overlay.assert_called_once() + call_kwargs = mock_overlay.call_args + assert call_kwargs.kwargs["duration"] == pytest.approx(13.0) + + +def test_mixed_profiles_smart_preserved_for_non_speed_segments(tmp_path: Path) -> None: + """Smart is preserved for profiles without speed_segments in multi-iteration.""" + from reeln.models.profile import SpeedSegment + from reeln.models.zoom import ZoomPath, ZoomPoint + + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + + plain_profile = RenderProfile(name="fullspeed", speed=1.0) + slowmo_profile = RenderProfile( + name="slowmo", + speed_segments=( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0, until=None), + ), + ) + config = _make_config(fullspeed=plain_profile, slowmo=slowmo_profile) + + short_cfg = ShortConfig( + input=clip, + output=output, + width=1080, + height=1920, + smart=True, + ) + + zoom = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + call_count = [0] + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + temp = _iteration_temp(output, call_count[0]) + temp.write_bytes(b"rendered") + call_count[0] += 1 + return _mock_render_result(temp) + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.plan_short") as mock_plan, + patch(f"{_MOD}.write_concat_file") as mock_concat_file, + patch(f"{_MOD}.build_concat_command", return_value=["ffmpeg"]), + patch(f"{_MOD}.run_ffmpeg"), + ): + mock_plan.return_value = RenderPlan(inputs=[clip], output=output) + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + concat_tmp = tmp_path / "concat.txt" + concat_tmp.write_text("file list") + mock_concat_file.return_value = concat_tmp + + _result, _ = render_iterations( + clip, + ["fullspeed", "slowmo"], + config, + Path("/usr/bin/ffmpeg"), + output, + is_short=True, + short_config=short_cfg, + zoom_path=zoom, + source_fps=60.0, + ) + + assert mock_plan.call_count == 2 + # First call (fullspeed): smart preserved, zoom_path passed unchanged + first_cfg = mock_plan.call_args_list[0][0][0] + assert first_cfg.smart is True + assert mock_plan.call_args_list[0].kwargs.get("zoom_path") is zoom + # Second call (slowmo with speed_segments): smart preserved, zoom remapped + second_cfg = mock_plan.call_args_list[1][0][0] + assert second_cfg.smart is True + remapped_zoom = mock_plan.call_args_list[1].kwargs.get("zoom_path") + assert remapped_zoom is not None + assert remapped_zoom is not zoom # remapped, not original + + +def test_multiple_profiles_xfade_fallback_to_concat(tmp_path: Path) -> None: + """When xfade fails, falls back to concat demuxer.""" + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + config = _make_config() + + call_count = [0] + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + temp = _iteration_temp(output, call_count[0]) + temp.write_bytes(b"rendered") + call_count[0] += 1 + return _mock_render_result(temp) + + run_count = [0] + + def fake_run_ffmpeg(cmd: list[str], **kwargs: object) -> None: + run_count[0] += 1 + if run_count[0] == 1: + raise RuntimeError("xfade not supported") + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.probe_duration", return_value=10.0), + patch(f"{_MOD}.build_xfade_command", return_value=["ffmpeg-xfade"]), + patch(f"{_MOD}.write_concat_file") as mock_concat_file, + patch(f"{_MOD}.build_concat_command", return_value=["ffmpeg-concat"]), + patch(f"{_MOD}.run_ffmpeg", side_effect=fake_run_ffmpeg), + ): + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + concat_tmp = tmp_path / "concat.txt" + concat_tmp.write_text("file list") + mock_concat_file.return_value = concat_tmp + + result, messages = render_iterations( + clip, + ["fullspeed", "slowmo"], + config, + Path("/usr/bin/ffmpeg"), + output, + ) + + assert result.concat_copy is False + assert any("Concatenated 2 iterations" in m for m in messages) + # run_ffmpeg called twice: xfade (fails) then concat (succeeds) + assert run_count[0] == 2 + mock_concat_file.assert_called_once() + + +@patch(f"{_MOD}.run_ffmpeg") +@patch(f"{_MOD}.FFmpegRenderer") +@patch(f"{_MOD}.plan_short") +def test_render_iterations_branding_first_only( + mock_plan: MagicMock, MockRenderer: MagicMock, mock_run: MagicMock, tmp_path: Path, +) -> None: + """Branding should only appear on the first iteration.""" + config = _make_config() + config = AppConfig( + video=config.video, + render_profiles={ + "a": RenderProfile(name="a", speed=1.0), + "b": RenderProfile(name="b", speed=1.0), + }, + iterations=config.iterations, + ) + clip = tmp_path / "clip.mkv" + clip.touch() + output = tmp_path / "output.mp4" + + branding_file = tmp_path / "brand.ass" + branding_file.write_text("[Script Info]\n") + short_cfg = ShortConfig( + input=clip, + output=output, + branding=branding_file, + ) + + mock_plan.return_value = RenderPlan( + inputs=[clip], output=output, filter_complex="scale=1080:-2" + ) + mock_instance = MagicMock() + mock_instance.render.side_effect = lambda plan, **kw: ( + plan.output.touch(), + _mock_render_result(plan.output), + )[1] + MockRenderer.return_value = mock_instance + + render_iterations( + clip, + ["a", "b"], + config, + Path("/usr/bin/ffmpeg"), + output, + is_short=True, + short_config=short_cfg, + ) + + assert mock_plan.call_count == 2 + first_cfg = mock_plan.call_args_list[0][0][0] + assert first_cfg.branding == branding_file + second_cfg = mock_plan.call_args_list[1][0][0] + assert second_cfg.branding is None + + +# --------------------------------------------------------------------------- +# game_info in POST_RENDER hook data +# --------------------------------------------------------------------------- + + +def test_game_info_included_in_post_render_hook( + tmp_path: Path, _mock_hook_registry: MagicMock, +) -> None: + """When game_info is provided, it appears in POST_RENDER hook data.""" + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + config = _make_config() + + iter0 = _iteration_temp(output, 0) + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + iter0.write_bytes(b"rendered") + return _mock_render_result(iter0) + + sentinel = object() + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.run_ffmpeg"), + ): + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + render_iterations( + clip, + ["fullspeed"], + config, + Path("/usr/bin/ffmpeg"), + output, + game_info=sentinel, + ) + + # Verify POST_RENDER was emitted with game_info in data + _mock_hook_registry.emit.assert_called_once() + call_args = _mock_hook_registry.emit.call_args + ctx = call_args[0][1] + assert ctx.data["game_info"] is sentinel + + +def test_game_info_omitted_when_none( + tmp_path: Path, _mock_hook_registry: MagicMock, +) -> None: + """When game_info is None, it is not included in POST_RENDER data.""" + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + config = _make_config() + + iter0 = _iteration_temp(output, 0) + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + iter0.write_bytes(b"rendered") + return _mock_render_result(iter0) + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.run_ffmpeg"), + ): + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + render_iterations( + clip, + ["fullspeed"], + config, + Path("/usr/bin/ffmpeg"), + output, + ) + + _mock_hook_registry.emit.assert_called_once() + call_args = _mock_hook_registry.emit.call_args + ctx = call_args[0][1] + assert "game_info" not in ctx.data + + +def test_event_context_included_in_post_render_hook( + tmp_path: Path, _mock_hook_registry: MagicMock, +) -> None: + """When game_event/player/assists are provided, they appear in POST_RENDER data.""" + clip = tmp_path / "clip.mkv" + clip.write_bytes(b"video") + output = tmp_path / "out.mp4" + config = _make_config() + + iter0 = _iteration_temp(output, 0) + + def fake_render(plan: object, **kwargs: object) -> RenderResult: + iter0.write_bytes(b"rendered") + return _mock_render_result(iter0) + + event_sentinel = object() + + with ( + patch(f"{_MOD}.FFmpegRenderer") as MockRenderer, + patch(f"{_MOD}.run_ffmpeg"), + ): + mock_instance = MagicMock() + mock_instance.render.side_effect = fake_render + MockRenderer.return_value = mock_instance + + render_iterations( + clip, + ["fullspeed"], + config, + Path("/usr/bin/ffmpeg"), + output, + game_event=event_sentinel, + player="#48 Remitz", + assists="#7 Smith", + ) + + _mock_hook_registry.emit.assert_called_once() + call_args = _mock_hook_registry.emit.call_args + ctx = call_args[0][1] + assert ctx.data["game_event"] is event_sentinel + assert ctx.data["player"] == "#48 Remitz" + assert ctx.data["assists"] == "#7 Smith" diff --git a/tests/unit/core/test_overlay.py b/tests/unit/core/test_overlay.py index 145c442..73462dd 100644 --- a/tests/unit/core/test_overlay.py +++ b/tests/unit/core/test_overlay.py @@ -30,6 +30,11 @@ def test_missing_template(self) -> None: with pytest.raises(RenderError, match="Builtin template not found"): resolve_builtin_template("nonexistent_template") + def test_branding_template(self) -> None: + path = resolve_builtin_template("branding") + assert path.is_file() + assert path.name == "branding.ass" + # --------------------------------------------------------------------------- # overlay_font_size @@ -161,6 +166,7 @@ def _base_ctx(self, **kwargs: str) -> TemplateContext: "home_team": "Roseville", "away_team": "Burnsville", "sport": "hockey", + "level": "bantam", "player": "#17 Smith", } defaults.update(kwargs) @@ -174,14 +180,15 @@ def test_full_scorer_and_assists(self) -> None: assert result.get("goal_scorer_text") == "#17 Smith" assert result.get("goal_assist_1") == "#22 Jones" assert result.get("goal_assist_2") == "#5 Brown" - assert result.get("goal_scorer_team") == "Roseville" - assert result.get("team_level") == "hockey" + assert result.get("goal_scorer_team") == "ROSEVILLE" + assert result.get("team_level") == "BANTAM" # Timing: assists visible assert result.get("scorer_start") == format_ass_time(0.0) - assert result.get("scorer_end") == format_ass_time(8.0) + assert result.get("scorer_end") == format_ass_time(9.0) assert result.get("assist_start") == format_ass_time(0.0) - assert result.get("assist_end") == format_ass_time(8.0) + assert result.get("assist_end") == format_ass_time(9.0) + assert result.get("box_end") == format_ass_time(9.0) def test_no_assists_hides_assist_timing(self) -> None: ctx = self._base_ctx() @@ -192,7 +199,7 @@ def test_no_assists_hides_assist_timing(self) -> None: # Assist end time should be 0 (hidden) assert result.get("assist_end") == format_ass_time(0.0) # Scorer still visible - assert result.get("scorer_end") == format_ass_time(10.0) + assert result.get("scorer_end") == format_ass_time(11.0) def test_no_player(self) -> None: ctx = self._base_ctx(player="") @@ -297,7 +304,8 @@ def test_preserves_base_context(self) -> None: def test_duration_default(self) -> None: ctx = self._base_ctx() result = build_overlay_context(ctx, event_metadata={}) - assert result.get("scorer_end") == format_ass_time(10.0) + assert result.get("scorer_end") == format_ass_time(11.0) + assert result.get("box_end") == format_ass_time(11.0) def test_none_event_metadata(self) -> None: ctx = self._base_ctx() @@ -315,3 +323,47 @@ def test_team_text_color_has_alpha(self) -> None: ctx = self._base_ctx() result = build_overlay_context(ctx, event_metadata={}) assert result.get("ass_team_text_color") == rgb_to_ass((255, 255, 255), 0x40) + + def test_scoring_team_overrides_home_team(self) -> None: + ctx = self._base_ctx() + result = build_overlay_context(ctx, event_metadata={}, scoring_team="Bears") + assert result.get("goal_scorer_team") == "BEARS" + + def test_team_level_uses_level_not_sport(self) -> None: + ctx = self._base_ctx(level="2016") + result = build_overlay_context(ctx, event_metadata={}) + assert result.get("team_level") == "2016" + + def test_team_level_empty_when_no_level(self) -> None: + ctx = self._base_ctx(level="") + result = build_overlay_context(ctx, event_metadata={}) + assert result.get("team_level") == "" + + def test_scoring_team_none_uses_home_team(self) -> None: + ctx = self._base_ctx() + result = build_overlay_context(ctx, event_metadata={}, scoring_team=None) + assert result.get("goal_scorer_team") == "ROSEVILLE" + + def test_tournament_promotes_to_title(self) -> None: + ctx = self._base_ctx(tournament="Presidents Cup") + result = build_overlay_context(ctx, event_metadata={}) + assert result.get("goal_scorer_team") == "PRESIDENTS CUP" + assert result.get("team_level") == "ROSEVILLE/BANTAM" + + def test_tournament_with_scoring_team(self) -> None: + ctx = self._base_ctx(tournament="Presidents Cup") + result = build_overlay_context(ctx, event_metadata={}, scoring_team="Bears") + assert result.get("goal_scorer_team") == "PRESIDENTS CUP" + assert result.get("team_level") == "BEARS/BANTAM" + + def test_tournament_without_level(self) -> None: + ctx = self._base_ctx(tournament="Presidents Cup", level="") + result = build_overlay_context(ctx, event_metadata={}) + assert result.get("goal_scorer_team") == "PRESIDENTS CUP" + assert result.get("team_level") == "ROSEVILLE" + + def test_no_tournament_keeps_original_format(self) -> None: + ctx = self._base_ctx(tournament="") + result = build_overlay_context(ctx, event_metadata={}) + assert result.get("goal_scorer_team") == "ROSEVILLE" + assert result.get("team_level") == "BANTAM" diff --git a/tests/unit/core/test_plugin_config.py b/tests/unit/core/test_plugin_config.py index f8cf801..c3eb5cd 100644 --- a/tests/unit/core/test_plugin_config.py +++ b/tests/unit/core/test_plugin_config.py @@ -273,12 +273,8 @@ def test_skips_plugins_without_schema(self) -> None: assert result == {"other": {"k": "v"}} def test_multiple_plugins(self) -> None: - schema_a = PluginConfigSchema( - fields=(ConfigField(name="flag_a", field_type="bool", default=True),) - ) - schema_b = PluginConfigSchema( - fields=(ConfigField(name="flag_b", field_type="bool", default=False),) - ) + schema_a = PluginConfigSchema(fields=(ConfigField(name="flag_a", field_type="bool", default=True),)) + schema_b = PluginConfigSchema(fields=(ConfigField(name="flag_b", field_type="bool", default=False),)) def _mock_schema(name: str) -> PluginConfigSchema | None: return {"a": schema_a, "b": schema_b}.get(name) diff --git a/tests/unit/core/test_profiles.py b/tests/unit/core/test_profiles.py index 8bce39f..2e4807d 100644 --- a/tests/unit/core/test_profiles.py +++ b/tests/unit/core/test_profiles.py @@ -19,7 +19,7 @@ ) from reeln.models.config import AppConfig from reeln.models.game import GameEvent -from reeln.models.profile import IterationConfig, RenderProfile +from reeln.models.profile import IterationConfig, RenderProfile, SpeedSegment from reeln.models.short import CropMode, ShortConfig from reeln.models.template import TemplateContext @@ -163,6 +163,34 @@ def test_subtitle_without_profile_template(self, tmp_path: Path) -> None: result = apply_profile_to_short(base, profile, rendered_subtitle=sub) assert result.subtitle == sub + def test_scale_override(self, tmp_path: Path) -> None: + base = _base_short(tmp_path) + profile = RenderProfile(name="zoom", scale=1.5) + result = apply_profile_to_short(base, profile) + assert result.scale == 1.5 + assert result.width == base.width # unchanged + + def test_smart_override(self, tmp_path: Path) -> None: + base = _base_short(tmp_path) + profile = RenderProfile(name="tracked", smart=True) + result = apply_profile_to_short(base, profile) + assert result.smart is True + + def test_smart_false_override(self, tmp_path: Path) -> None: + base = _base_short(tmp_path) + base_with_smart = apply_profile_to_short(base, RenderProfile(name="s", smart=True)) + profile = RenderProfile(name="no-track", smart=False) + result = apply_profile_to_short(base_with_smart, profile) + assert result.smart is False + + def test_speed_segments_override(self, tmp_path: Path) -> None: + base = _base_short(tmp_path) + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + profile = RenderProfile(name="var", speed_segments=segs) + result = apply_profile_to_short(base, profile) + assert result.speed_segments == segs + assert result.speed == 1.0 # unchanged + def test_original_unchanged(self, tmp_path: Path) -> None: base = _base_short(tmp_path) profile = RenderProfile(name="fast", speed=2.0) @@ -209,7 +237,7 @@ def test_subtitle_only(self) -> None: profile = RenderProfile(name="overlay") fc, af = build_profile_filter_chain(profile, rendered_subtitle=sub) assert fc is not None - assert "ass=" in fc + assert "subtitles=" in fc assert af is None def test_all_filters(self) -> None: @@ -221,7 +249,7 @@ def test_all_filters(self) -> None: assert len(parts) == 3 # lut, speed, subtitle assert "lut3d" in parts[0] assert "setpts" in parts[1] - assert "ass=" in parts[2] + assert "subtitles=" in parts[2] assert af is not None def test_filter_order_lut_before_speed(self) -> None: @@ -232,6 +260,42 @@ def test_filter_order_lut_before_speed(self) -> None: speed_pos = fc.index("setpts") assert lut_pos < speed_pos + def test_speed_segments(self) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + profile = RenderProfile(name="var", speed_segments=segs) + fc, af = build_profile_filter_chain(profile) + assert fc is not None + assert "split=2" in fc + assert "asplit=2" in fc + assert af is None + + def test_speed_segments_with_lut(self) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + profile = RenderProfile(name="var", speed_segments=segs, lut="warm.cube") + fc, af = build_profile_filter_chain(profile) + assert fc is not None + lut_pos = fc.index("lut3d") + split_pos = fc.index("split=2") + assert lut_pos < split_pos + assert af is None + + def test_speed_segments_with_subtitle(self) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + sub = Path("/tmp/overlay.ass") + profile = RenderProfile(name="var", speed_segments=segs) + fc, af = build_profile_filter_chain(profile, rendered_subtitle=sub) + assert fc is not None + concat_pos = fc.index("concat=n=2:v=1:a=0") + sub_pos = fc.index("subtitles=") + assert sub_pos > concat_pos + assert af is None + + def test_speed_and_speed_segments_mutual_exclusion(self) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + profile = RenderProfile(name="bad", speed=0.5, speed_segments=segs) + with pytest.raises(RenderError, match="mutually exclusive"): + build_profile_filter_chain(profile) + # --------------------------------------------------------------------------- # plan_full_frame @@ -300,7 +364,7 @@ def test_with_subtitle(self, tmp_path: Path) -> None: rendered_subtitle=sub, ) assert plan.filter_complex is not None - assert "ass=" in plan.filter_complex + assert "subtitles=" in plan.filter_complex def test_no_width_height_in_plan(self, tmp_path: Path) -> None: profile = RenderProfile(name="full", width=1080, height=1920) @@ -322,6 +386,22 @@ def test_invalid_speed_low(self, tmp_path: Path) -> None: with pytest.raises(RenderError, match="Speed must be"): plan_full_frame(tmp_path / "clip.mkv", tmp_path / "out.mp4", profile, config) + def test_speed_segments(self, tmp_path: Path) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + profile = RenderProfile(name="var", speed_segments=segs) + config = AppConfig() + plan = plan_full_frame(tmp_path / "clip.mkv", tmp_path / "out.mp4", profile, config) + assert plan.filter_complex is not None + assert "split=2" in plan.filter_complex + assert plan.audio_filter is None + + def test_speed_and_speed_segments_mutual_exclusion(self, tmp_path: Path) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + profile = RenderProfile(name="bad", speed=0.5, speed_segments=segs) + config = AppConfig() + with pytest.raises(RenderError, match="mutually exclusive"): + plan_full_frame(tmp_path / "clip.mkv", tmp_path / "out.mp4", profile, config) + # --------------------------------------------------------------------------- # resolve_subtitle_for_profile diff --git a/tests/unit/core/test_prompts.py b/tests/unit/core/test_prompts.py index fa533e4..d0a00a0 100644 --- a/tests/unit/core/test_prompts.py +++ b/tests/unit/core/test_prompts.py @@ -22,6 +22,7 @@ prompt_sport, prompt_team, prompt_thumbnail, + prompt_tournament, prompt_venue, ) from reeln.models.team import TeamProfile @@ -434,6 +435,42 @@ def test_create_team_interactive_short_name_cancelled(mock_questionary: MagicMoc create_team_interactive("bantam", "home") +# --------------------------------------------------------------------------- +# prompt_tournament +# --------------------------------------------------------------------------- + + +def test_prompt_tournament_preset_returns_immediately() -> None: + assert prompt_tournament(preset="Stars Cup") == "Stars Cup" + + +def test_prompt_tournament_preset_empty_string_returns_immediately() -> None: + assert prompt_tournament(preset="") == "" + + +def test_prompt_tournament_interactive(mock_questionary: MagicMock) -> None: + mock_questionary.text.return_value.ask.return_value = "2026 Stars of Tomorrow" + with patch("reeln.core.prompts._require_questionary", return_value=mock_questionary): + result = prompt_tournament() + assert result == "2026 Stars of Tomorrow" + + +def test_prompt_tournament_skipped_returns_empty(mock_questionary: MagicMock) -> None: + """Tournament is optional — empty string is accepted.""" + mock_questionary.text.return_value.ask.return_value = "" + with patch("reeln.core.prompts._require_questionary", return_value=mock_questionary): + result = prompt_tournament() + assert result == "" + + +def test_prompt_tournament_cancelled_returns_empty(mock_questionary: MagicMock) -> None: + """Tournament is optional — cancellation returns empty, not PromptAborted.""" + mock_questionary.text.return_value.ask.return_value = None + with patch("reeln.core.prompts._require_questionary", return_value=mock_questionary): + result = prompt_tournament() + assert result == "" + + # --------------------------------------------------------------------------- # prompt_period_length # --------------------------------------------------------------------------- @@ -564,6 +601,7 @@ def test_collect_all_presets_no_import_needed() -> None: period_length=15, description="Big game", thumbnail="/tmp/thumb.jpg", + tournament="Stars Cup", ) assert result["home"] == "eagles" assert result["away"] == "bears" @@ -574,6 +612,7 @@ def test_collect_all_presets_no_import_needed() -> None: assert result["period_length"] == 15 assert result["description"] == "Big game" assert result["thumbnail"] == "/tmp/thumb.jpg" + assert result["tournament"] == "Stars Cup" assert result["home_profile"] is None assert result["away_profile"] is None @@ -590,6 +629,7 @@ def test_collect_no_profiles_when_both_preset() -> None: period_length=15, description="", thumbnail="", + tournament="", ) assert result["home_profile"] is None assert result["away_profile"] is None @@ -610,6 +650,7 @@ def test_collect_with_profiles(mock_questionary: MagicMock) -> None: patch("reeln.core.prompts.prompt_period_length", return_value=15), patch("reeln.core.prompts.prompt_description", return_value="Test desc"), patch("reeln.core.prompts.prompt_thumbnail", return_value="/tmp/t.jpg"), + patch("reeln.core.prompts.prompt_tournament", return_value="Stars Cup"), ): result = collect_game_info_interactive() @@ -624,6 +665,31 @@ def test_collect_with_profiles(mock_questionary: MagicMock) -> None: assert result["period_length"] == 15 assert result["description"] == "Test desc" assert result["thumbnail"] == "/tmp/t.jpg" + assert result["tournament"] == "Stars Cup" + + +def test_collect_level_preset_passed_through() -> None: + """When level is provided, it's passed as preset to prompt_level.""" + home_prof = TeamProfile(team_name="North", short_name="NOR", level="2016") + away_prof = TeamProfile(team_name="South", short_name="SOU", level="2016") + + with ( + patch("reeln.core.prompts.prompt_sport", return_value="hockey"), + patch("reeln.core.prompts.prompt_level", return_value="2016") as mock_level, + patch("reeln.core.prompts.prompt_team", side_effect=[home_prof, away_prof]), + patch("reeln.core.prompts.prompt_date", return_value="2026-03-22"), + patch("reeln.core.prompts.prompt_venue", return_value=""), + patch("reeln.core.prompts.prompt_game_time", return_value=""), + patch("reeln.core.prompts.prompt_period_length", return_value=15), + patch("reeln.core.prompts.prompt_description", return_value=""), + patch("reeln.core.prompts.prompt_thumbnail", return_value=""), + patch("reeln.core.prompts.prompt_tournament", return_value=""), + ): + result = collect_game_info_interactive(level="2016") + + mock_level.assert_called_once_with(preset="2016") + assert result["home"] == "North" + assert result["away"] == "South" def test_collect_with_profiles_and_presets() -> None: @@ -640,6 +706,7 @@ def test_collect_with_profiles_and_presets() -> None: patch("reeln.core.prompts.prompt_period_length", return_value=15), patch("reeln.core.prompts.prompt_description", return_value=""), patch("reeln.core.prompts.prompt_thumbnail", return_value=""), + patch("reeln.core.prompts.prompt_tournament", return_value=""), ): result = collect_game_info_interactive( home="roseville", @@ -672,6 +739,7 @@ def test_collect_with_home_prompted_away_preset() -> None: patch("reeln.core.prompts.prompt_period_length", return_value=15), patch("reeln.core.prompts.prompt_description", return_value=""), patch("reeln.core.prompts.prompt_thumbnail", return_value=""), + patch("reeln.core.prompts.prompt_tournament", return_value=""), ): result = collect_game_info_interactive( home=None, @@ -714,6 +782,7 @@ def test_collect_all_interactive() -> None: patch("reeln.core.prompts.prompt_period_length", return_value=15), patch("reeln.core.prompts.prompt_description", return_value="Game day"), patch("reeln.core.prompts.prompt_thumbnail", return_value=""), + patch("reeln.core.prompts.prompt_tournament", return_value="Stars Cup"), ): result = collect_game_info_interactive() @@ -726,5 +795,6 @@ def test_collect_all_interactive() -> None: assert result["period_length"] == 15 assert result["description"] == "Game day" assert result["thumbnail"] == "" + assert result["tournament"] == "Stars Cup" assert result["home_profile"] is home_prof assert result["away_profile"] is away_prof diff --git a/tests/unit/core/test_prune.py b/tests/unit/core/test_prune.py index 367ceee..702cb81 100644 --- a/tests/unit/core/test_prune.py +++ b/tests/unit/core/test_prune.py @@ -378,6 +378,47 @@ def test_prune_game_no_debug_dir(tmp_path: Path) -> None: assert len(result.removed_paths) == 0 +def test_prune_game_removes_outputs_contents(tmp_path: Path) -> None: + """Prune removes files inside the outputs/ directory.""" + state = _make_state( + finished=True, + finished_at="2026-02-26T14:00:00+00:00", + segment_outputs=["period-1_2026-02-26.mkv"], + highlights_output="roseville_vs_mahtomedi_2026-02-26.mkv", + ) + _write_state(tmp_path, state) + + outputs = tmp_path / "outputs" + outputs.mkdir() + (outputs / "period-1_2026-02-26.mkv").write_bytes(b"x" * 200) + (outputs / "roseville_vs_mahtomedi_2026-02-26.mkv").write_bytes(b"x" * 300) + + result, _messages = prune_game(tmp_path) + + assert not (outputs / "period-1_2026-02-26.mkv").exists() + assert not (outputs / "roseville_vs_mahtomedi_2026-02-26.mkv").exists() + assert len(result.removed_paths) == 2 + assert result.bytes_freed == 500 + # Empty outputs dir should be cleaned up + assert not outputs.exists() + + +def test_prune_game_outputs_dry_run(tmp_path: Path) -> None: + """Dry run reports outputs/ files but does not remove them.""" + state = _make_state(finished=True, finished_at="2026-02-26T14:00:00+00:00") + _write_state(tmp_path, state) + + outputs = tmp_path / "outputs" + outputs.mkdir() + (outputs / "period-1_2026-02-26.mkv").write_bytes(b"x" * 200) + + result, messages = prune_game(tmp_path, dry_run=True) + + assert (outputs / "period-1_2026-02-26.mkv").exists() + assert len(result.removed_paths) == 1 + assert any("Would remove" in m for m in messages) + + def test_prune_game_ignores_non_video_non_temp(tmp_path: Path) -> None: """Non-video, non-temp files should be left alone.""" state = _make_state(finished=True, finished_at="2026-02-26T14:00:00+00:00") diff --git a/tests/unit/core/test_renderer.py b/tests/unit/core/test_renderer.py index 987c8a7..16a0ca5 100644 --- a/tests/unit/core/test_renderer.py +++ b/tests/unit/core/test_renderer.py @@ -269,3 +269,167 @@ def test_render_emits_pre_and_post_render_hooks(tmp_path: Path) -> None: assert emitted[1].hook is Hook.POST_RENDER assert emitted[1].data["plan"] is plan assert isinstance(emitted[1].data["result"], RenderResult) + + +# --------------------------------------------------------------------------- +# extract_frames +# --------------------------------------------------------------------------- + + +def test_extract_frames_single(tmp_path: Path) -> None: + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + out_dir = tmp_path / "frames" + out_dir.mkdir() + + with ( + patch("reeln.core.renderer.probe_duration", return_value=10.0), + patch("reeln.core.renderer.probe_resolution", return_value=(1920, 1080)), + patch("reeln.core.renderer.probe_fps", return_value=59.94), + patch("reeln.core.renderer.run_ffmpeg") as mock_run, + ): + mock_run.return_value = _mock_ffmpeg_success() + result = renderer.extract_frames(tmp_path / "clip.mkv", 1, out_dir) + + assert len(result.frame_paths) == 1 + assert len(result.timestamps) == 1 + assert result.timestamps[0] == pytest.approx(5.0) + assert result.source_width == 1920 + assert result.source_height == 1080 + assert result.duration == 10.0 + assert result.fps == 59.94 + mock_run.assert_called_once() + + +def test_extract_frames_multiple(tmp_path: Path) -> None: + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + out_dir = tmp_path / "frames" + out_dir.mkdir() + + with ( + patch("reeln.core.renderer.probe_duration", return_value=30.0), + patch("reeln.core.renderer.probe_resolution", return_value=(1920, 1080)), + patch("reeln.core.renderer.probe_fps", return_value=30.0), + patch("reeln.core.renderer.run_ffmpeg") as mock_run, + ): + mock_run.return_value = _mock_ffmpeg_success() + result = renderer.extract_frames(tmp_path / "clip.mkv", 5, out_dir) + + assert len(result.frame_paths) == 5 + assert len(result.timestamps) == 5 + # Timestamps should be evenly spaced: 5.0, 10.0, 15.0, 20.0, 25.0 + assert result.timestamps[0] == pytest.approx(5.0) + assert result.timestamps[4] == pytest.approx(25.0) + assert mock_run.call_count == 5 + + +def test_extract_frames_no_duration_raises(tmp_path: Path) -> None: + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + out_dir = tmp_path / "frames" + out_dir.mkdir() + + from reeln.core.errors import RenderError + + with ( + patch("reeln.core.renderer.probe_duration", return_value=None), + pytest.raises(RenderError, match="Cannot probe duration"), + ): + renderer.extract_frames(tmp_path / "clip.mkv", 3, out_dir) + + +def test_extract_frames_zero_duration_raises(tmp_path: Path) -> None: + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + out_dir = tmp_path / "frames" + out_dir.mkdir() + + from reeln.core.errors import RenderError + + with ( + patch("reeln.core.renderer.probe_duration", return_value=0.0), + pytest.raises(RenderError, match="Cannot probe duration"), + ): + renderer.extract_frames(tmp_path / "clip.mkv", 3, out_dir) + + +def test_extract_frames_no_resolution_raises(tmp_path: Path) -> None: + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + out_dir = tmp_path / "frames" + out_dir.mkdir() + + from reeln.core.errors import RenderError + + with ( + patch("reeln.core.renderer.probe_duration", return_value=10.0), + patch("reeln.core.renderer.probe_resolution", return_value=None), + pytest.raises(RenderError, match="Cannot probe resolution"), + ): + renderer.extract_frames(tmp_path / "clip.mkv", 3, out_dir) + + +def test_extract_frames_fps_fallback(tmp_path: Path) -> None: + """When fps probe returns None, fall back to 30.0.""" + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + out_dir = tmp_path / "frames" + out_dir.mkdir() + + with ( + patch("reeln.core.renderer.probe_duration", return_value=10.0), + patch("reeln.core.renderer.probe_resolution", return_value=(1920, 1080)), + patch("reeln.core.renderer.probe_fps", return_value=None), + patch("reeln.core.renderer.run_ffmpeg") as mock_run, + ): + mock_run.return_value = _mock_ffmpeg_success() + result = renderer.extract_frames(tmp_path / "clip.mkv", 2, out_dir) + + assert result.fps == 30.0 + + +def test_extract_frames_uses_build_extract_command(tmp_path: Path) -> None: + """Verify the correct ffmpeg command is constructed.""" + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + out_dir = tmp_path / "frames" + out_dir.mkdir() + + with ( + patch("reeln.core.renderer.probe_duration", return_value=10.0), + patch("reeln.core.renderer.probe_resolution", return_value=(1920, 1080)), + patch("reeln.core.renderer.probe_fps", return_value=60.0), + patch("reeln.core.renderer.run_ffmpeg") as mock_run, + ): + mock_run.return_value = _mock_ffmpeg_success() + renderer.extract_frames(tmp_path / "clip.mkv", 1, out_dir) + + call_args = mock_run.call_args[0][0] + assert "-ss" in call_args + assert "-frames:v" in call_args + assert "1" in call_args + + +def test_render_emit_hooks_false_suppresses_hooks(tmp_path: Path) -> None: + """When emit_hooks=False, PRE_RENDER and POST_RENDER hooks are not emitted.""" + plan = _make_plan(tmp_path) + renderer = FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg")) + + emitted: list[HookContext] = [] + registry = get_registry() + registry.register(Hook.PRE_RENDER, emitted.append) + registry.register(Hook.POST_RENDER, emitted.append) + + with ( + patch("reeln.core.renderer.run_ffmpeg"), + patch("reeln.core.renderer.probe_duration", return_value=10.0), + ): + result = renderer.render(plan, emit_hooks=False) + + assert isinstance(result, RenderResult) + assert result.duration_seconds == 10.0 + assert len(emitted) == 0 + + +def test_ffmpeg_renderer_satisfies_protocol_with_extract() -> None: + """Verify FFmpegRenderer still satisfies the Renderer protocol after adding extract_frames.""" + from reeln.core.renderer import Renderer + + def _accept_renderer(r: Renderer) -> None: + pass + + _accept_renderer(FFmpegRenderer(ffmpeg_path=Path("/usr/bin/ffmpeg"))) diff --git a/tests/unit/core/test_shorts.py b/tests/unit/core/test_shorts.py index 98a86e4..4e06c7c 100644 --- a/tests/unit/core/test_shorts.py +++ b/tests/unit/core/test_shorts.py @@ -8,19 +8,28 @@ from reeln.core.errors import RenderError from reeln.core.shorts import ( + _build_atempo_chain, + _escape_filter_path, + _resolve_smart, + _round_even, build_audio_speed_filter, build_crop_filter, build_filter_chain, build_final_scale_filter, build_lut_filter, + build_overflow_crop_filter, build_pad_filter, build_scale_filter, build_speed_filter, + build_speed_segments_filters, build_subtitle_filter, + compute_speed_segments_duration, plan_preview, plan_short, validate_short_config, + validate_speed_segments, ) +from reeln.models.profile import SpeedSegment from reeln.models.short import CropMode, ShortConfig @@ -108,6 +117,36 @@ def test_validate_anchor_y_above(tmp_path: Path) -> None: validate_short_config(_cfg(tmp_path, anchor_y=1.1)) +def test_validate_scale_too_low(tmp_path: Path) -> None: + with pytest.raises(RenderError, match=r"Scale must be 0\.5-3\.0"): + validate_short_config(_cfg(tmp_path, scale=0.4)) + + +def test_validate_scale_too_high(tmp_path: Path) -> None: + with pytest.raises(RenderError, match=r"Scale must be 0\.5-3\.0"): + validate_short_config(_cfg(tmp_path, scale=3.1)) + + +def test_validate_scale_at_bounds(tmp_path: Path) -> None: + validate_short_config(_cfg(tmp_path, scale=0.5)) + validate_short_config(_cfg(tmp_path, scale=3.0)) + + +def test_validate_smart_zoom_frames_too_low(tmp_path: Path) -> None: + with pytest.raises(RenderError, match="Smart zoom frames must be 1-20"): + validate_short_config(_cfg(tmp_path, smart_zoom_frames=0)) + + +def test_validate_smart_zoom_frames_too_high(tmp_path: Path) -> None: + with pytest.raises(RenderError, match="Smart zoom frames must be 1-20"): + validate_short_config(_cfg(tmp_path, smart_zoom_frames=21)) + + +def test_validate_smart_zoom_frames_at_bounds(tmp_path: Path) -> None: + validate_short_config(_cfg(tmp_path, smart_zoom_frames=1)) + validate_short_config(_cfg(tmp_path, smart_zoom_frames=20)) + + def test_validate_lut_bad_suffix(tmp_path: Path) -> None: with pytest.raises(RenderError, match=r"LUT file must be \.cube or \.3dl"): validate_short_config(_cfg(tmp_path, lut=tmp_path / "grade.png")) @@ -203,12 +242,137 @@ def test_build_audio_speed_filter_fast() -> None: def test_build_lut_filter() -> None: result = build_lut_filter(Path("/tmp/grade.cube")) - assert result == "lut3d='/tmp/grade.cube'" + assert result == "lut3d=/tmp/grade.cube" def test_build_subtitle_filter() -> None: result = build_subtitle_filter(Path("/tmp/subs.ass")) - assert result == "ass='/tmp/subs.ass'" + assert result == "subtitles=f=/tmp/subs.ass" + + +def test_escape_filter_path_no_special_chars() -> None: + assert _escape_filter_path(Path("/tmp/video.mp4")) == "/tmp/video.mp4" + + +def test_escape_filter_path_all_special_chars() -> None: + """Backslash, colon, quote, brackets, semicolon, comma are all escaped.""" + result = _escape_filter_path(Path("/a:b'c[d]e;f,g")) + assert result == "/a\\:b\\'c\\[d\\]e\\;f\\,g" + + +def test_escape_filter_path_backslash_first() -> None: + """Backslash is escaped before other chars to avoid double-escaping.""" + result = _escape_filter_path(Path("/a\\:b")) + assert result == "/a\\\\\\:b" + + +def test_build_lut_filter_special_path() -> None: + result = build_lut_filter(Path("/path/to:grade.cube")) + assert result == "lut3d=/path/to\\:grade.cube" + + +def test_build_subtitle_filter_special_path() -> None: + result = build_subtitle_filter(Path("/path/to:subs.ass")) + assert result == "subtitles=f=/path/to\\:subs.ass" + + +# --------------------------------------------------------------------------- +# _round_even +# --------------------------------------------------------------------------- + + +def test_round_even_already_even() -> None: + assert _round_even(1080) == 1080 + + +def test_round_even_odd() -> None: + assert _round_even(1081) == 1082 + + +def test_round_even_zero() -> None: + assert _round_even(0) == 0 + + +# --------------------------------------------------------------------------- +# build_scale_filter with scale +# --------------------------------------------------------------------------- + + +def test_build_scale_filter_pad_with_scale() -> None: + result = build_scale_filter(crop_mode=CropMode.PAD, target_width=1080, target_height=1920, scale=1.3) + # 1080 * 1.3 = 1404 + assert result == "scale=1404:-2:flags=lanczos" + + +def test_build_scale_filter_crop_with_scale() -> None: + result = build_scale_filter(crop_mode=CropMode.CROP, target_width=1080, target_height=1920, scale=1.3) + # 1920 * 1.3 = 2496 + assert result == "scale=-2:2496:flags=lanczos" + + +def test_build_scale_filter_pad_scale_rounds_even() -> None: + # 1080 * 1.1 = 1188 (already even) + result = build_scale_filter(crop_mode=CropMode.PAD, target_width=1080, target_height=1920, scale=1.1) + assert result == "scale=1188:-2:flags=lanczos" + + +def test_build_scale_filter_crop_scale_rounds_odd_up() -> None: + # 1920 * 1.05 = 2016 (even) + result = build_scale_filter(crop_mode=CropMode.CROP, target_width=1080, target_height=1920, scale=1.05) + assert result == "scale=-2:2016:flags=lanczos" + + +# --------------------------------------------------------------------------- +# build_overflow_crop_filter +# --------------------------------------------------------------------------- + + +def test_build_overflow_crop_filter_structure() -> None: + result = build_overflow_crop_filter(target_width=1080, target_height=1920) + assert "min(iw,1080)" in result + assert "min(ih,1920)" in result + assert "crop=" in result + + +# --------------------------------------------------------------------------- +# _resolve_smart +# --------------------------------------------------------------------------- + + +def test_resolve_smart_pad_no_smart() -> None: + mode, smart = _resolve_smart(CropMode.PAD, False) + assert mode == CropMode.PAD + assert smart is False + + +def test_resolve_smart_crop_no_smart() -> None: + mode, smart = _resolve_smart(CropMode.CROP, False) + assert mode == CropMode.CROP + assert smart is False + + +def test_resolve_smart_pad_with_smart_flag() -> None: + mode, smart = _resolve_smart(CropMode.PAD, True) + assert mode == CropMode.PAD + assert smart is True + + +def test_resolve_smart_crop_with_smart_flag() -> None: + mode, smart = _resolve_smart(CropMode.CROP, True) + assert mode == CropMode.CROP + assert smart is True + + +def test_resolve_smart_deprecated_smart() -> None: + mode, smart = _resolve_smart(CropMode.SMART, False) + assert mode == CropMode.CROP + assert smart is True + + +def test_resolve_smart_deprecated_smart_pad() -> None: + mode, smart = _resolve_smart(CropMode.SMART_PAD, False) + assert mode == CropMode.PAD + assert smart is True # --------------------------------------------------------------------------- @@ -245,7 +409,7 @@ def test_build_filter_chain_with_lut(tmp_path: Path) -> None: lut = tmp_path / "grade.cube" cfg = _cfg(tmp_path, lut=lut) chain, audio = build_filter_chain(cfg) - assert chain.startswith(f"lut3d='{lut}'") + assert chain.startswith(f"lut3d={lut}") assert audio is None @@ -253,7 +417,7 @@ def test_build_filter_chain_with_subtitle(tmp_path: Path) -> None: sub = tmp_path / "subs.ass" cfg = _cfg(tmp_path, subtitle=sub) chain, _audio = build_filter_chain(cfg) - assert chain.endswith(f"ass='{sub}'") + assert chain.endswith(f"subtitles=f={sub}") def test_build_filter_chain_pad_full(tmp_path: Path) -> None: @@ -263,7 +427,7 @@ def test_build_filter_chain_pad_full(tmp_path: Path) -> None: cfg = _cfg(tmp_path, speed=0.5, lut=lut, subtitle=sub) chain, audio = build_filter_chain(cfg) expected = ( - f"lut3d='{lut}',setpts=PTS/0.5,scale=1080:-2:flags=lanczos,pad=1080:1920:(ow-iw)/2:(oh-ih)/2:black,ass='{sub}'" + f"lut3d={lut},setpts=PTS/0.5,scale=1080:-2:flags=lanczos,pad=1080:1920:(ow-iw)/2:(oh-ih)/2:black,subtitles=f={sub}" ) assert chain == expected assert audio == "atempo=0.5" @@ -283,12 +447,12 @@ def test_build_filter_chain_crop_full(tmp_path: Path) -> None: ) chain, audio = build_filter_chain(cfg) expected = ( - f"lut3d='{lut}'," + f"lut3d={lut}," "setpts=PTS/0.5," "scale=-2:1920:flags=lanczos," "crop=w=ih*1080/1920:h=ih:x=(iw-ih*1080/1920)*0.3:y=0," "scale=1080:1920:flags=lanczos," - f"ass='{sub}'" + f"subtitles=f={sub}" ) assert chain == expected assert audio == "atempo=0.5" @@ -300,6 +464,210 @@ def test_build_filter_chain_square_format(tmp_path: Path) -> None: assert "pad=1080:1080" in chain +# --------------------------------------------------------------------------- +# Filter chain: scale combinations +# --------------------------------------------------------------------------- + + +def test_build_filter_chain_pad_with_scale(tmp_path: Path) -> None: + """Pad + scale>1.0: scale up, overflow crop, then pad.""" + cfg = _cfg(tmp_path, scale=1.3) + chain, _ = build_filter_chain(cfg) + # Scale to 1404 (1080*1.3) + assert "scale=1404:-2:flags=lanczos" in chain + # Overflow crop back to 1080x1920 + assert "min(iw,1080)" in chain + assert "min(ih,1920)" in chain + # Then pad + assert "pad=1080:1920" in chain + + +def test_build_filter_chain_crop_with_scale(tmp_path: Path) -> None: + """Crop + scale>1.0: scale to larger height, crop, final scale.""" + cfg = _cfg(tmp_path, crop_mode=CropMode.CROP, scale=1.3) + chain, _ = build_filter_chain(cfg) + # Scale to 2496 (1920*1.3) + assert "scale=-2:2496:flags=lanczos" in chain + # Static crop + assert "crop=w=ih*1080/1920" in chain + # Final scale + assert "scale=1080:1920:flags=lanczos" in chain + # No overflow crop + assert "min(iw," not in chain + + +def test_build_filter_chain_pad_scale_1_no_overflow_crop(tmp_path: Path) -> None: + """Pad at scale=1.0 should NOT have overflow crop.""" + cfg = _cfg(tmp_path) + chain, _ = build_filter_chain(cfg) + assert "min(iw," not in chain + + +def test_build_filter_chain_smart_flag_pad(tmp_path: Path) -> None: + """smart=True + pad uses smart pad filter with height-based scale.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.4),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, smart=True) + chain, _ = build_filter_chain(cfg, zoom_path=zp) + # Smart pad scales by height for horizontal panning room + assert "scale=-2:1920:flags=lanczos" in chain + # Smart pad uses overlay on colour background + assert "color=c=black:s=1080x1920" in chain + assert "[_bg][_fg]overlay=" in chain + + +def test_build_filter_chain_smart_pad_with_subtitle(tmp_path: Path) -> None: + """smart pad + subtitle routes subtitle through post_filters.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + sub = tmp_path / "overlay.ass" + sub.write_text("[Script Info]\n") + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, smart=True, subtitle=sub) + chain, audio = build_filter_chain(cfg, zoom_path=zp) + assert "subtitles=" in chain + assert "format=yuv420p" in chain + assert audio is None + + +def test_build_filter_chain_smart_pad_with_speed(tmp_path: Path) -> None: + """smart pad + speed!=1.0 returns audio_filter.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, smart=True, speed=0.5) + chain, audio = build_filter_chain(cfg, zoom_path=zp) + assert "overlay=" in chain + assert audio == "atempo=0.5" + + +def test_build_filter_chain_smart_flag_crop(tmp_path: Path) -> None: + """smart=True + crop uses smart crop filter.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, crop_mode=CropMode.CROP, smart=True) + chain, _ = build_filter_chain(cfg, zoom_path=zp) + assert "scale=-2:1920:flags=lanczos" in chain + # Smart crop uses dynamic expressions + assert "crop=w=" in chain + assert "scale=1080:1920:flags=lanczos" in chain + + +def test_build_filter_chain_smart_flag_no_zoom_path_raises(tmp_path: Path) -> None: + """smart=True without zoom_path raises RenderError.""" + cfg = _cfg(tmp_path, smart=True) + with pytest.raises(RenderError, match="Smart crop mode requires a zoom path"): + build_filter_chain(cfg) + + +def test_build_filter_chain_smart_with_scale(tmp_path: Path) -> None: + """smart + crop + scale>1.0: bigger scale, then smart crop.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, crop_mode=CropMode.CROP, smart=True, scale=1.3) + chain, _ = build_filter_chain(cfg, zoom_path=zp) + # Scale is 1920*1.3 = 2496 + assert "scale=-2:2496:flags=lanczos" in chain + # Smart crop + assert "crop=w=" in chain + + +def test_build_filter_chain_deprecated_smart_backward_compat(tmp_path: Path) -> None: + """CropMode.SMART still produces crop + smart crop.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART) + chain, _ = build_filter_chain(cfg, zoom_path=zp) + assert "scale=-2:1920:flags=lanczos" in chain + assert "crop=w=" in chain + assert "scale=1080:1920:flags=lanczos" in chain + + +def test_build_filter_chain_deprecated_smart_pad_backward_compat(tmp_path: Path) -> None: + """CropMode.SMART_PAD still produces pad + smart pad with height-based scale.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.4),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD) + chain, _ = build_filter_chain(cfg, zoom_path=zp) + # Smart pad scales by height + assert "scale=-2:1920:flags=lanczos" in chain + assert "color=c=black:s=1080x1920" in chain + assert "[_bg][_fg]overlay=" in chain + + +def test_build_filter_chain_deprecated_smart_no_zoom_path_raises(tmp_path: Path) -> None: + """CropMode.SMART without zoom_path raises.""" + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART) + with pytest.raises(RenderError, match="Smart crop mode requires a zoom path"): + build_filter_chain(cfg) + + +def test_build_filter_chain_deprecated_smart_pad_no_zoom_path_raises(tmp_path: Path) -> None: + """CropMode.SMART_PAD without zoom_path raises.""" + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD) + with pytest.raises(RenderError, match="Smart crop mode requires a zoom path"): + build_filter_chain(cfg) + + +def test_build_filter_chain_pad_smart_with_scale(tmp_path: Path) -> None: + """Pad + smart + scale>1.0: scales by height*scale, no overflow crop.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.4),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, smart=True, scale=1.3) + chain, _ = build_filter_chain(cfg, zoom_path=zp) + # Smart pad scales by height * scale = 1920 * 1.3 = 2496 + assert "scale=-2:2496:flags=lanczos" in chain + # No overflow crop in smart pad — the overlay handles clipping + assert "min(iw," not in chain + assert "[_bg][_fg]overlay=" in chain # smart pad via overlay + + # --------------------------------------------------------------------------- # plan_short # --------------------------------------------------------------------------- @@ -399,3 +767,494 @@ def test_plan_preview_even_rounding(tmp_path: Path) -> None: plan = plan_preview(cfg) assert plan.width == 540 assert plan.height == 542 # 1082 // 2 = 541, + 1 = 542 + + +def test_plan_preview_propagates_scale(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, scale=1.3) + plan = plan_preview(cfg) + assert plan.filter_complex is not None + # Preview is half-res (540), scaled by 1.3 = 702 + assert "scale=702:-2:flags=lanczos" in plan.filter_complex + + +def test_plan_preview_propagates_smart(tmp_path: Path) -> None: + """Preview with smart=True but no zoom_path raises.""" + cfg = _cfg(tmp_path, smart=True) + with pytest.raises(RenderError, match="Smart crop mode requires a zoom path"): + plan_preview(cfg) + + +def test_plan_preview_propagates_speed_segments(tmp_path: Path) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs) + plan = plan_preview(cfg) + assert plan.filter_complex is not None + assert "split=2" in plan.filter_complex + assert "asplit=2" in plan.filter_complex + assert plan.audio_filter is None + + +# --------------------------------------------------------------------------- +# validate_speed_segments +# --------------------------------------------------------------------------- + + +def test_compute_speed_segments_duration_basic() -> None: + """Output duration accounts for speed changes in each segment.""" + segs = ( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0), + ) + # 5s@1x + 3s@0.5x + 2s@1x = 5 + 6 + 2 = 13 + assert compute_speed_segments_duration(segs, 10.0) == pytest.approx(13.0) + + +def test_compute_speed_segments_duration_all_normal() -> None: + """All-1x segments produce same duration as source.""" + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=1.0)) + assert compute_speed_segments_duration(segs, 10.0) == pytest.approx(10.0) + + +def test_compute_speed_segments_duration_all_slow() -> None: + """All-0.5x produces double the source duration.""" + segs = (SpeedSegment(speed=0.5, until=5.0), SpeedSegment(speed=0.5)) + assert compute_speed_segments_duration(segs, 10.0) == pytest.approx(20.0) + + +def test_validate_speed_segments_valid_two() -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + validate_speed_segments(segs) + + +def test_validate_speed_segments_valid_three() -> None: + segs = ( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0), + ) + validate_speed_segments(segs) + + +def test_validate_speed_segments_single_segment() -> None: + with pytest.raises(RenderError, match="at least 2 segments"): + validate_speed_segments((SpeedSegment(speed=0.5),)) + + +def test_validate_speed_segments_last_has_until() -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5, until=10.0)) + with pytest.raises(RenderError, match="last segment must have until=None"): + validate_speed_segments(segs) + + +def test_validate_speed_segments_non_last_missing_until() -> None: + segs = (SpeedSegment(speed=1.0), SpeedSegment(speed=0.5)) + with pytest.raises(RenderError, match="must have 'until' set"): + validate_speed_segments(segs) + + +def test_validate_speed_segments_until_not_increasing() -> None: + segs = ( + SpeedSegment(speed=1.0, until=8.0), + SpeedSegment(speed=0.5, until=5.0), + SpeedSegment(speed=1.0), + ) + with pytest.raises(RenderError, match="strictly increasing"): + validate_speed_segments(segs) + + +def test_validate_speed_segments_until_zero() -> None: + segs = (SpeedSegment(speed=1.0, until=0.0), SpeedSegment(speed=0.5)) + with pytest.raises(RenderError, match="must be positive"): + validate_speed_segments(segs) + + +def test_validate_speed_segments_until_negative() -> None: + segs = (SpeedSegment(speed=1.0, until=-1.0), SpeedSegment(speed=0.5)) + with pytest.raises(RenderError, match="must be positive"): + validate_speed_segments(segs) + + +def test_validate_speed_segments_speed_too_low() -> None: + segs = (SpeedSegment(speed=0.1, until=5.0), SpeedSegment(speed=1.0)) + with pytest.raises(RenderError, match=r"speed must be 0\.25-4\.0"): + validate_speed_segments(segs) + + +def test_validate_speed_segments_speed_too_high() -> None: + segs = (SpeedSegment(speed=5.0, until=5.0), SpeedSegment(speed=1.0)) + with pytest.raises(RenderError, match=r"speed must be 0\.25-4\.0"): + validate_speed_segments(segs) + + +def test_validate_speed_segments_speed_at_bounds() -> None: + segs = (SpeedSegment(speed=0.25, until=5.0), SpeedSegment(speed=4.0)) + validate_speed_segments(segs) + + +def test_validate_short_config_speed_and_speed_segments_mutual_exclusion(tmp_path: Path) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed=0.5, speed_segments=segs) + with pytest.raises(RenderError, match="mutually exclusive"): + validate_short_config(cfg) + + +def test_validate_short_config_speed_segments_validated(tmp_path: Path) -> None: + """speed_segments on ShortConfig are validated during validate_short_config.""" + segs = (SpeedSegment(speed=0.5),) # single segment → error + cfg = _cfg(tmp_path, speed_segments=segs) + with pytest.raises(RenderError, match="at least 2 segments"): + validate_short_config(cfg) + + +# --------------------------------------------------------------------------- +# _build_atempo_chain +# --------------------------------------------------------------------------- + + +def test_build_atempo_chain_normal() -> None: + assert _build_atempo_chain(1.0) == "atempo=1.0" + + +def test_build_atempo_chain_half() -> None: + assert _build_atempo_chain(0.5) == "atempo=0.5" + + +def test_build_atempo_chain_quarter() -> None: + result = _build_atempo_chain(0.25) + assert result == "atempo=0.5,atempo=0.5" + + +def test_build_atempo_chain_fast() -> None: + assert _build_atempo_chain(2.0) == "atempo=2.0" + + +# --------------------------------------------------------------------------- +# build_speed_segments_filters +# --------------------------------------------------------------------------- + + +def test_build_speed_segments_filters_two_segments() -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + video, audio = build_speed_segments_filters(segs) + + # Video: split, trim segments, concat + assert "[_vsrc]split=2[v0][v1]" in video + assert "[v0]trim=0.0:5.0,setpts=PTS-STARTPTS[sv0]" in video + assert "[v1]trim=5.0,setpts=PTS-STARTPTS,setpts=PTS/0.5[sv1]" in video + assert "[sv0][sv1]concat=n=2:v=1:a=0[_vout]" in video + + # Audio: asplit, atrim segments, concat + assert "[_asrc]asplit=2[a0][a1]" in audio + assert "[a0]atrim=0.0:5.0,asetpts=PTS-STARTPTS[sa0]" in audio + assert "[a1]atrim=5.0,asetpts=PTS-STARTPTS,atempo=0.5[sa1]" in audio + assert "[sa0][sa1]concat=n=2:v=0:a=1[_aout]" in audio + + +def test_build_speed_segments_filters_three_segments() -> None: + segs = ( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0), + ) + video, audio = build_speed_segments_filters(segs) + assert "split=3" in video + assert "asplit=3" in audio + assert "concat=n=3:v=1:a=0" in video + assert "concat=n=3:v=0:a=1" in audio + # Middle segment has both trim boundaries + assert "trim=5.0:8.0" in video + assert "atrim=5.0:8.0" in audio + + +def test_build_speed_segments_filters_speed_1_no_extra_setpts() -> None: + """Segments with speed=1.0 omit the speed setpts/atempo.""" + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + video, audio = build_speed_segments_filters(segs) + # v0 (speed=1.0) should NOT have setpts=PTS/1.0 + v0_part = video.split(";")[1] # [v0]trim=... + assert "setpts=PTS/1.0" not in v0_part + # a0 (speed=1.0) should NOT have atempo + a0_part = audio.split(";")[1] + assert "atempo" not in a0_part + + +def test_build_speed_segments_filters_very_slow_speed() -> None: + """Speed < 0.5 uses chained atempo.""" + segs = (SpeedSegment(speed=0.25, until=5.0), SpeedSegment(speed=1.0)) + _, audio = build_speed_segments_filters(segs) + assert "atempo=0.5,atempo=0.5" in audio + + +# --------------------------------------------------------------------------- +# Filter chain with speed_segments +# --------------------------------------------------------------------------- + + +def test_build_filter_chain_speed_segments_pad(tmp_path: Path) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs) + chain, audio = build_filter_chain(cfg) + # Video graph uses filter_complex with split/concat + assert "split=2" in chain + assert "concat=n=2:v=1:a=0" in chain + # Post-speed filters: height-based scale (like smart pad), overflow crop, pad + assert "scale=-2:1920:flags=lanczos" in chain + assert "min(iw,1080)" in chain # overflow crop + assert "pad=1080:1920" in chain + # Audio also in filter_complex + assert "asplit=2" in chain + assert "concat=n=2:v=0:a=1" in chain + # audio_filter is None (audio in filter_complex) + assert audio is None + + +def test_build_filter_chain_speed_segments_crop(tmp_path: Path) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, crop_mode=CropMode.CROP, speed_segments=segs) + chain, audio = build_filter_chain(cfg) + assert "crop=w=ih*1080/1920" in chain + assert "scale=1080:1920:flags=lanczos" in chain + assert audio is None + + +def test_build_filter_chain_speed_segments_with_lut(tmp_path: Path) -> None: + lut = tmp_path / "grade.cube" + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, lut=lut) + chain, _ = build_filter_chain(cfg) + # LUT goes before split + lut_pos = chain.index("lut3d=") + split_pos = chain.index("split=2") + assert lut_pos < split_pos + + +def test_build_filter_chain_speed_segments_with_subtitle(tmp_path: Path) -> None: + sub = tmp_path / "subs.ass" + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, subtitle=sub) + chain, _ = build_filter_chain(cfg) + # Subtitle goes after concat + concat_pos = chain.index("concat=n=2:v=1:a=0") + sub_pos = chain.index("subtitles=") + assert sub_pos > concat_pos + + +def test_build_filter_chain_speed_segments_with_smart_pad(tmp_path: Path) -> None: + """speed_segments + smart pad uses overlay on colour background after concat.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + ) + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, smart=True) + chain, audio = build_filter_chain(cfg, zoom_path=zp, source_fps=60.0) + # Audio is embedded in filter_complex + assert audio is None + # Should contain colour source, overlay with t-based expression, and stream labels + assert "color=c=black:s=1080x1920" in chain + assert "overlay=" in chain + assert "[vfinal]" in chain + assert "[afinal]" in chain + # Speed segments split/concat present + assert "split=2" in chain + assert "trim=" in chain + assert "concat=n=2:v=1:a=0" in chain + # Height-based scale for pad mode + assert "scale=-2:" in chain + + +def test_build_filter_chain_speed_segments_smart_pad_with_subtitle(tmp_path: Path) -> None: + """speed_segments + smart pad + subtitle routes subtitle through post-overlay.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + sub = tmp_path / "overlay.ass" + sub.write_text("[Script Info]\n") + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, smart=True, subtitle=sub) + chain, audio = build_filter_chain(cfg, zoom_path=zp, source_fps=30.0) + assert audio is None + assert "overlay=" in chain + assert "subtitles=" in chain + assert "format=yuv420p" in chain + assert "[_ov]" in chain + + +def test_build_filter_chain_speed_segments_smart_pad_with_lut(tmp_path: Path) -> None: + """speed_segments + smart pad + LUT wires LUT as pre-filter before split.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + lut = tmp_path / "color.cube" + lut.write_text("LUT\n") + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, smart=True, lut=lut) + chain, audio = build_filter_chain(cfg, zoom_path=zp, source_fps=30.0) + assert audio is None + assert "lut3d=" in chain + assert "overlay=" in chain + # LUT should appear before split + lut_pos = chain.index("lut3d=") + split_pos = chain.index("split=") + assert lut_pos < split_pos + + +def test_build_filter_chain_speed_segments_smart_crop(tmp_path: Path) -> None: + """speed_segments + smart crop uses smart crop filter after concat.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, crop_mode=CropMode.CROP, smart=True) + chain, audio = build_filter_chain(cfg, zoom_path=zp, source_fps=30.0) + assert audio is None + assert "[vfinal]" in chain + assert "[afinal]" in chain + # Smart crop filter after concat + assert "crop=w=" in chain + assert "scale=1080:1920:flags=lanczos" in chain + + +def test_build_filter_chain_speed_segments_pad_with_scale(tmp_path: Path) -> None: + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, scale=1.3) + chain, _ = build_filter_chain(cfg) + # Pad + scale > 1.0: height-based scale + overflow crop + assert "min(iw,1080)" in chain + assert "scale=-2:2496:flags=lanczos" in chain + + +# --------------------------------------------------------------------------- +# Branding in filter chains +# --------------------------------------------------------------------------- + + +def test_validate_branding_bad_extension(tmp_path: Path) -> None: + branding = tmp_path / "brand.txt" + branding.write_text("hi") + with pytest.raises(RenderError, match=r"Branding file must be \.ass"): + validate_short_config(_cfg(tmp_path, branding=branding)) + + +def test_validate_branding_good_extension(tmp_path: Path) -> None: + branding = tmp_path / "brand.ass" + branding.write_text("[Script Info]\n") + validate_short_config(_cfg(tmp_path, branding=branding)) + + +def test_build_filter_chain_with_branding_pad(tmp_path: Path) -> None: + """Path 1 (simple pad): branding appended after subtitle.""" + brand = tmp_path / "brand.ass" + brand.write_text("[Script Info]\n") + cfg = _cfg(tmp_path, branding=brand) + chain, _ = build_filter_chain(cfg) + escaped = str(brand).replace(":", "\\:").replace(",", "\\,") + assert f"subtitles=f={escaped}" in chain + + +def test_build_filter_chain_with_branding_crop(tmp_path: Path) -> None: + """Path 1 (simple crop): branding appended after subtitle.""" + brand = tmp_path / "brand.ass" + brand.write_text("[Script Info]\n") + cfg = _cfg(tmp_path, crop_mode=CropMode.CROP, branding=brand) + chain, _ = build_filter_chain(cfg) + assert "subtitles=" in chain + + +def test_build_filter_chain_branding_after_subtitle(tmp_path: Path) -> None: + """Path 1: branding filter appears after subtitle filter.""" + sub = tmp_path / "overlay.ass" + sub.write_text("[Script Info]\n") + brand = tmp_path / "brand.ass" + brand.write_text("[Script Info]\n") + cfg = _cfg(tmp_path, subtitle=sub, branding=brand) + chain, _ = build_filter_chain(cfg) + sub_escaped = str(sub).replace(":", "\\:").replace(",", "\\,") + brand_escaped = str(brand).replace(":", "\\:").replace(",", "\\,") + sub_pos = chain.index(f"subtitles=f={sub_escaped}") + brand_pos = chain.index(f"subtitles=f={brand_escaped}") + assert brand_pos > sub_pos + + +def test_build_filter_chain_smart_pad_with_branding(tmp_path: Path) -> None: + """Path 2 (smart pad): branding in post_filters.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + brand = tmp_path / "brand.ass" + brand.write_text("[Script Info]\n") + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + cfg = _cfg(tmp_path, smart=True, branding=brand) + chain, _ = build_filter_chain(cfg, zoom_path=zp) + assert "overlay=" in chain + assert "subtitles=" in chain + + +def test_build_filter_chain_speed_segments_static_with_branding(tmp_path: Path) -> None: + """Path 3 (speed segments static): branding in post list.""" + brand = tmp_path / "brand.ass" + brand.write_text("[Script Info]\n") + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, branding=brand) + chain, audio = build_filter_chain(cfg) + assert audio is None + assert "subtitles=" in chain + assert "[vfinal]" in chain + + +def test_build_filter_chain_speed_segments_smart_pad_with_branding(tmp_path: Path) -> None: + """Path 4 (speed segments + smart pad): branding in post_overlay.""" + from reeln.models.zoom import ZoomPath, ZoomPoint + + brand = tmp_path / "brand.ass" + brand.write_text("[Script Info]\n") + zp = ZoomPath( + duration=10.0, + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + ) + segs = (SpeedSegment(speed=1.0, until=5.0), SpeedSegment(speed=0.5)) + cfg = _cfg(tmp_path, speed_segments=segs, smart=True, branding=brand) + chain, audio = build_filter_chain(cfg, zoom_path=zp, source_fps=30.0) + assert audio is None + assert "overlay=" in chain + assert "subtitles=" in chain + assert "format=yuv420p" in chain + assert "[_ov]" in chain + + +def test_plan_preview_passes_branding(tmp_path: Path) -> None: + """plan_preview passes branding through to the preview config.""" + brand = tmp_path / "brand.ass" + brand.write_text("[Script Info]\n") + cfg = _cfg(tmp_path, branding=brand) + plan = plan_preview(cfg) + assert "subtitles=" in plan.filter_complex diff --git a/tests/unit/core/test_teams.py b/tests/unit/core/test_teams.py index 40518ff..f5a2017 100644 --- a/tests/unit/core/test_teams.py +++ b/tests/unit/core/test_teams.py @@ -1,4 +1,4 @@ -"""Tests for team profile management.""" +"""Tests for team profile management, roster loading, and player lookup.""" from __future__ import annotations @@ -14,11 +14,15 @@ delete_team_profile, list_levels, list_team_profiles, + load_roster, load_team_profile, + lookup_players, + resolve_scoring_team, save_team_profile, slugify, ) -from reeln.models.team import TeamProfile +from reeln.models.game import GameInfo +from reeln.models.team import RosterEntry, TeamProfile # --------------------------------------------------------------------------- # slugify @@ -58,8 +62,8 @@ def test_slugify_numbers() -> None: # --------------------------------------------------------------------------- -def test_teams_base_dir_uses_config_dir(tmp_path: Path) -> None: - with patch("reeln.core.teams.config_dir", return_value=tmp_path): +def test_teams_base_dir_uses_config_base_dir(tmp_path: Path) -> None: + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): assert _teams_base_dir() == tmp_path / "teams" @@ -70,7 +74,7 @@ def test_teams_base_dir_uses_config_dir(tmp_path: Path) -> None: def test_save_team_profile_creates_file(tmp_path: Path) -> None: profile = TeamProfile(team_name="Roseville", short_name="ROS", level="bantam") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = save_team_profile(profile, "roseville") assert result == tmp_path / "teams" / "bantam" / "roseville.json" @@ -84,7 +88,7 @@ def test_save_team_profile_creates_file(tmp_path: Path) -> None: def test_save_team_profile_creates_directories(tmp_path: Path) -> None: profile = TeamProfile(team_name="A", short_name="A", level="varsity") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = save_team_profile(profile, "a") assert (tmp_path / "teams" / "varsity").is_dir() @@ -94,7 +98,7 @@ def test_save_team_profile_creates_directories(tmp_path: Path) -> None: def test_save_team_profile_overwrites_existing(tmp_path: Path) -> None: profile1 = TeamProfile(team_name="Old", short_name="OLD", level="jv") profile2 = TeamProfile(team_name="New", short_name="NEW", level="jv") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): save_team_profile(profile1, "team") save_team_profile(profile2, "team") @@ -106,7 +110,7 @@ def test_save_team_profile_overwrites_existing(tmp_path: Path) -> None: def test_save_team_profile_atomic_write_cleanup(tmp_path: Path) -> None: """If write fails, temp file is cleaned up.""" profile = TeamProfile(team_name="A", short_name="A", level="bantam") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): # Create the directory first (tmp_path / "teams" / "bantam").mkdir(parents=True) with ( @@ -133,7 +137,7 @@ def test_load_team_profile_success(tmp_path: Path) -> None: level="bantam", colors=["red"], ) - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): save_team_profile(profile, "roseville") loaded = load_team_profile("bantam", "roseville") @@ -142,7 +146,7 @@ def test_load_team_profile_success(tmp_path: Path) -> None: def test_load_team_profile_missing_raises(tmp_path: Path) -> None: with ( - patch("reeln.core.teams.config_dir", return_value=tmp_path), + patch("reeln.core.teams._config_base_dir", return_value=tmp_path), pytest.raises(ConfigError, match="not found"), ): load_team_profile("bantam", "nonexistent") @@ -154,7 +158,7 @@ def test_load_team_profile_invalid_json(tmp_path: Path) -> None: (level_dir / "bad.json").write_text("not json!", encoding="utf-8") with ( - patch("reeln.core.teams.config_dir", return_value=tmp_path), + patch("reeln.core.teams._config_base_dir", return_value=tmp_path), pytest.raises(ConfigError, match="Failed to read"), ): load_team_profile("bantam", "bad") @@ -166,7 +170,7 @@ def test_load_team_profile_not_a_dict(tmp_path: Path) -> None: (level_dir / "list.json").write_text('["not", "a", "dict"]', encoding="utf-8") with ( - patch("reeln.core.teams.config_dir", return_value=tmp_path), + patch("reeln.core.teams._config_base_dir", return_value=tmp_path), pytest.raises(ConfigError, match="must be a JSON object"), ): load_team_profile("bantam", "list") @@ -179,7 +183,7 @@ def test_load_team_profile_level_fallback(tmp_path: Path) -> None: data = {"team_name": "Roseville", "short_name": "ROS"} (level_dir / "roseville.json").write_text(json.dumps(data), encoding="utf-8") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): loaded = load_team_profile("bantam", "roseville") assert loaded.level == "bantam" @@ -197,7 +201,7 @@ def test_list_team_profiles_returns_sorted_slugs(tmp_path: Path) -> None: (level_dir / "mahtomedi.json").write_text("{}") (level_dir / "white_bear.json").write_text("{}") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = list_team_profiles("bantam") assert result == ["mahtomedi", "roseville", "white_bear"] @@ -207,14 +211,14 @@ def test_list_team_profiles_empty_dir(tmp_path: Path) -> None: level_dir = tmp_path / "teams" / "bantam" level_dir.mkdir(parents=True) - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = list_team_profiles("bantam") assert result == [] def test_list_team_profiles_missing_dir(tmp_path: Path) -> None: - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = list_team_profiles("nonexistent") assert result == [] @@ -227,7 +231,7 @@ def test_list_team_profiles_ignores_non_json(tmp_path: Path) -> None: (level_dir / "notes.txt").write_text("some notes") (level_dir / "subdir").mkdir() - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = list_team_profiles("bantam") assert result == ["roseville"] @@ -244,14 +248,14 @@ def test_list_levels_returns_sorted(tmp_path: Path) -> None: (base / "bantam").mkdir() (base / "jv").mkdir() - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = list_levels() assert result == ["bantam", "jv", "varsity"] def test_list_levels_empty(tmp_path: Path) -> None: - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = list_levels() assert result == [] @@ -263,7 +267,7 @@ def test_list_levels_ignores_files(tmp_path: Path) -> None: (base / "bantam").mkdir() (base / "config.json").write_text("{}") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = list_levels() assert result == ["bantam"] @@ -279,7 +283,7 @@ def test_delete_team_profile_success(tmp_path: Path) -> None: level_dir.mkdir(parents=True) (level_dir / "roseville.json").write_text("{}") - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = delete_team_profile("bantam", "roseville") assert result is True @@ -287,7 +291,199 @@ def test_delete_team_profile_success(tmp_path: Path) -> None: def test_delete_team_profile_not_found(tmp_path: Path) -> None: - with patch("reeln.core.teams.config_dir", return_value=tmp_path): + with patch("reeln.core.teams._config_base_dir", return_value=tmp_path): result = delete_team_profile("bantam", "nonexistent") assert result is False + + +# --------------------------------------------------------------------------- +# load_roster +# --------------------------------------------------------------------------- + + +def test_load_roster_happy_path(tmp_path: Path) -> None: + roster_csv = tmp_path / "roster.csv" + roster_csv.write_text("number,name,position\n48,John Smith,C\n24,Jane Doe,D\n2,Bob Jones,RW\n") + + roster = load_roster(roster_csv) + + assert len(roster) == 3 + assert roster["48"] == RosterEntry(number="48", name="John Smith", position="C") + assert roster["24"] == RosterEntry(number="24", name="Jane Doe", position="D") + assert roster["2"] == RosterEntry(number="2", name="Bob Jones", position="RW") + + +def test_load_roster_missing_file(tmp_path: Path) -> None: + with pytest.raises(ConfigError, match="Roster file not found"): + load_roster(tmp_path / "nonexistent.csv") + + +def test_load_roster_empty_file(tmp_path: Path) -> None: + roster_csv = tmp_path / "empty.csv" + roster_csv.write_text("") + + with pytest.raises(ConfigError, match="empty or has no header"): + load_roster(roster_csv) + + +def test_load_roster_missing_columns(tmp_path: Path) -> None: + roster_csv = tmp_path / "bad.csv" + roster_csv.write_text("number,name\n48,John Smith\n") + + with pytest.raises(ConfigError, match="missing required columns"): + load_roster(roster_csv) + + +def test_load_roster_skips_empty_numbers(tmp_path: Path) -> None: + roster_csv = tmp_path / "roster.csv" + roster_csv.write_text("number,name,position\n48,John Smith,C\n,,\n24,Jane Doe,D\n") + + roster = load_roster(roster_csv) + + assert len(roster) == 2 + assert "48" in roster + assert "24" in roster + + +def test_load_roster_unreadable_file(tmp_path: Path) -> None: + roster_csv = tmp_path / "unreadable.csv" + roster_csv.write_text("number,name,position\n48,John Smith,C\n") + with ( + patch.object(Path, "read_text", side_effect=OSError("permission denied")), + pytest.raises(ConfigError, match="Failed to read roster file"), + ): + load_roster(roster_csv) + + +def test_load_roster_strips_whitespace(tmp_path: Path) -> None: + roster_csv = tmp_path / "roster.csv" + roster_csv.write_text("number , name , position\n 48 , John Smith , C \n") + + roster = load_roster(roster_csv) + + assert roster["48"] == RosterEntry(number="48", name="John Smith", position="C") + + +# --------------------------------------------------------------------------- +# lookup_players +# --------------------------------------------------------------------------- + + +def test_lookup_players_happy_path() -> None: + roster = { + "48": RosterEntry(number="48", name="John Smith", position="C"), + "24": RosterEntry(number="24", name="Jane Doe", position="D"), + "2": RosterEntry(number="2", name="Bob Jones", position="RW"), + } + + scorer, assists = lookup_players(roster, ["48", "24", "2"], "Eagles") + + assert scorer == "#48 John Smith" + assert assists == ["#24 Jane Doe", "#2 Bob Jones"] + + +def test_lookup_players_missing_number() -> None: + roster = { + "48": RosterEntry(number="48", name="John Smith", position="C"), + } + + scorer, assists = lookup_players(roster, ["48", "99"], "Eagles") + + assert scorer == "#48 John Smith" + assert assists == ["#99"] + + +def test_lookup_players_single_number() -> None: + roster = { + "48": RosterEntry(number="48", name="John Smith", position="C"), + } + + scorer, assists = lookup_players(roster, ["48"], "Eagles") + + assert scorer == "#48 John Smith" + assert assists == [] + + +def test_lookup_players_empty_roster() -> None: + scorer, assists = lookup_players({}, ["48", "24"], "Eagles") + + assert scorer == "#48" + assert assists == ["#24"] + + +def test_lookup_players_empty_numbers() -> None: + roster = {"48": RosterEntry(number="48", name="John Smith", position="C")} + + scorer, assists = lookup_players(roster, [], "Eagles") + + assert scorer == "" + assert assists == [] + + +def test_lookup_players_all_missing() -> None: + scorer, assists = lookup_players({}, ["99", "88"], "Eagles") + + assert scorer == "#99" + assert assists == ["#88"] + + +# --------------------------------------------------------------------------- +# resolve_scoring_team +# --------------------------------------------------------------------------- + + +def _game_info_with_teams() -> GameInfo: + return GameInfo( + date="2026-03-04", + home_team="Eagles", + away_team="Bears", + sport="hockey", + level="bantam", + home_slug="eagles", + away_slug="bears", + ) + + +def test_resolve_scoring_team_home_goal() -> None: + gi = _game_info_with_teams() + name, slug, level = resolve_scoring_team("HOME_GOAL", gi) + assert name == "Eagles" + assert slug == "eagles" + assert level == "bantam" + + +def test_resolve_scoring_team_away_goal() -> None: + gi = _game_info_with_teams() + name, slug, level = resolve_scoring_team("AWAY_GOAL", gi) + assert name == "Bears" + assert slug == "bears" + assert level == "bantam" + + +def test_resolve_scoring_team_lowercase_home() -> None: + gi = _game_info_with_teams() + name, slug, _level = resolve_scoring_team("home_goal", gi) + assert name == "Eagles" + assert slug == "eagles" + + +def test_resolve_scoring_team_lowercase_away() -> None: + gi = _game_info_with_teams() + name, slug, _level = resolve_scoring_team("away_goal", gi) + assert name == "Bears" + assert slug == "bears" + + +def test_resolve_scoring_team_generic_defaults_home() -> None: + gi = _game_info_with_teams() + name, slug, _level = resolve_scoring_team("goal", gi) + assert name == "Eagles" + assert slug == "eagles" + + +def test_resolve_scoring_team_empty_defaults_home() -> None: + gi = _game_info_with_teams() + name, slug, _level = resolve_scoring_team("", gi) + assert name == "Eagles" + assert slug == "eagles" diff --git a/tests/unit/core/test_templates.py b/tests/unit/core/test_templates.py index 92b718c..a968fec 100644 --- a/tests/unit/core/test_templates.py +++ b/tests/unit/core/test_templates.py @@ -120,6 +120,8 @@ def test_build_base_context_game_info_only() -> None: assert ctx.get("game_number") == "1" assert ctx.get("game_time") == "" assert ctx.get("period_length") == "0" + assert ctx.get("level") == "" + assert ctx.get("tournament") == "" assert ctx.get("event_type") == "" # not present @@ -135,6 +137,20 @@ def test_build_base_context_with_game_time() -> None: assert ctx.get("game_time") == "7:00 PM" +def test_build_base_context_with_level_and_tournament() -> None: + info = GameInfo( + date="2026-03-21", + home_team="North", + away_team="South", + sport="hockey", + level="2016", + tournament="2026 Stars of Tomorrow", + ) + ctx = build_base_context(info) + assert ctx.get("level") == "2016" + assert ctx.get("tournament") == "2026 Stars of Tomorrow" + + def test_build_base_context_with_event() -> None: info = GameInfo( date="2026-02-28", diff --git a/tests/unit/core/test_zoom.py b/tests/unit/core/test_zoom.py new file mode 100644 index 0000000..f043492 --- /dev/null +++ b/tests/unit/core/test_zoom.py @@ -0,0 +1,519 @@ +"""Tests for piecewise lerp and smart crop filter builders.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from reeln.core.errors import RenderError +from reeln.core.shorts import build_filter_chain +from reeln.core.zoom import _downsample, build_piecewise_lerp, build_smart_crop_filter +from reeln.models.short import CropMode, ShortConfig +from reeln.models.zoom import ZoomPath, ZoomPoint + +# --------------------------------------------------------------------------- +# build_piecewise_lerp +# --------------------------------------------------------------------------- + + +def test_lerp_empty_values() -> None: + assert build_piecewise_lerp([], 10.0) == "0" + + +def test_lerp_single_point() -> None: + result = build_piecewise_lerp([(5.0, 0.5)], 10.0) + assert result == "0.5" + + +def test_lerp_two_points() -> None: + result = build_piecewise_lerp([(0.0, 0.3), (10.0, 0.7)], 10.0) + # Flat: lt(t,10)*lerp + gte(t,10)*0.7 + assert "lt(t,10)" in result + assert "0.3" in result + assert "0.7" in result + # No nested if() calls — uses sum-of-products + assert "if(" not in result + + +def test_lerp_three_points() -> None: + result = build_piecewise_lerp([(0.0, 0.2), (5.0, 0.8), (10.0, 0.5)], 10.0) + # Three terms: lt(t,5)*lerp01 + gte(t,5)*lt(t,10)*lerp12 + gte(t,10)*0.5 + assert "lt(t,5)" in result + assert "gte(t,5)*lt(t,10)" in result + assert "gte(t,10)*0.5" in result + + +def test_lerp_custom_time_expr() -> None: + result = build_piecewise_lerp([(0.0, 0.0), (10.0, 1.0)], 10.0, time_expr="T") + # Time expression should use T + assert "lt(T," in result + assert "gte(T," in result + + +def test_lerp_zero_duration_segment() -> None: + """Two points at the same timestamp should not divide by zero.""" + result = build_piecewise_lerp([(5.0, 0.3), (5.0, 0.7)], 10.0) + # With dt=0, should use the first point's value + assert "0.3" in result + + +def test_lerp_values_clamp_to_last() -> None: + """After the last point, expression should return the last value.""" + result = build_piecewise_lerp([(0.0, 0.1), (5.0, 0.9)], 10.0) + # Last term clamps to 0.9 + assert "gte(t,5)*0.9" in result + + +def test_lerp_many_points_downsampled() -> None: + """Many points are downsampled to stay within ffmpeg parser limits.""" + values = [(float(i), float(i) / 20.0) for i in range(20)] + result = build_piecewise_lerp(values, 20.0) + # Should not use any nested if() — all flat sum-of-products + assert "if(" not in result + # Downsampled to 9 points = 8 segments: 1 lt() + 7 gte()*lt() + 1 gte() clamp + assert result.count("lt(t,") == 8 # first term + 7 middle terms + assert result.count("gte(t,") == 8 # 7 middle terms + 1 clamp term + + +def test_lerp_nine_points_not_downsampled() -> None: + """Nine points (8 segments) should not be downsampled.""" + values = [(float(i), float(i) / 9.0) for i in range(9)] + result = build_piecewise_lerp(values, 9.0) + assert result.count("lt(t,") == 8 + assert result.count("gte(t,") == 8 + + +# --------------------------------------------------------------------------- +# _downsample +# --------------------------------------------------------------------------- + + +def test_downsample_under_limit() -> None: + values = [(0.0, 0.1), (1.0, 0.5), (2.0, 0.9)] + assert _downsample(values, 5) is values # same object, not a copy + + +def test_downsample_at_limit() -> None: + values = [(float(i), float(i)) for i in range(5)] + assert _downsample(values, 5) is values + + +def test_downsample_preserves_endpoints() -> None: + values = [(float(i), float(i)) for i in range(20)] + result = _downsample(values, 5) + assert len(result) == 5 + assert result[0] == values[0] + assert result[-1] == values[-1] + + +def test_downsample_evenly_spaced() -> None: + values = [(float(i), float(i)) for i in range(10)] + result = _downsample(values, 4) + assert len(result) == 4 + assert result[0] == (0.0, 0.0) + assert result[-1] == (9.0, 9.0) + # Inner points should be roughly evenly distributed + for pt in result[1:-1]: + assert pt in values + + +# --------------------------------------------------------------------------- +# build_smart_crop_filter +# --------------------------------------------------------------------------- + + +def _make_zoom_path( + points: list[tuple[float, float, float]] | None = None, +) -> ZoomPath: + """Create a ZoomPath from (timestamp, center_x, center_y) tuples.""" + if points is None: + points = [(0.0, 0.5, 0.5), (10.0, 0.5, 0.5)] + return ZoomPath( + points=tuple(ZoomPoint(timestamp=t, center_x=cx, center_y=cy) for t, cx, cy in points), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + +def test_smart_crop_filter_structure() -> None: + path = _make_zoom_path() + result = build_smart_crop_filter(path, 1080, 1920) + assert result.startswith("crop=w='") + assert "ih*1080/1920" in result + assert ":h='ih':" in result + assert ":x='" in result + assert ":y='" in result + + +def test_smart_crop_filter_static_center() -> None: + """Static center should produce constant 0.5 values in lerp.""" + path = _make_zoom_path([(0.0, 0.5, 0.5)]) + result = build_smart_crop_filter(path, 1080, 1920) + assert "0.5" in result + + +def test_smart_crop_filter_panning() -> None: + """A pan from left to right should produce lerp with 0.2 and 0.8.""" + path = _make_zoom_path([(0.0, 0.2, 0.5), (10.0, 0.8, 0.5)]) + result = build_smart_crop_filter(path, 1080, 1920) + assert "0.2" in result + assert "0.8" in result + + +def test_smart_crop_filter_clamps_x() -> None: + """X expression should be clamped with max(0,min(...)).""" + path = _make_zoom_path() + result = build_smart_crop_filter(path, 1080, 1920) + assert "max(0,min(" in result + + +def test_smart_crop_filter_clamps_y() -> None: + """Y expression should be clamped with max(0,min(...)).""" + path = _make_zoom_path([(0.0, 0.5, 0.0), (10.0, 0.5, 1.0)]) + result = build_smart_crop_filter(path, 1080, 1920) + # Should have max(0,min()) for both x and y + assert result.count("max(0,min(") == 2 + + +def test_smart_crop_filter_values_single_quoted() -> None: + """Crop parameter values are single-quoted to protect commas from ffmpeg's filter parser.""" + path = _make_zoom_path() + result = build_smart_crop_filter(path, 1080, 1920) + assert "w='" in result + assert "h='" in result + assert "x='" in result + assert "y='" in result + + +def test_smart_crop_filter_multi_point() -> None: + """Three-point path should produce flat sum-of-products expressions.""" + path = _make_zoom_path( + [ + (0.0, 0.2, 0.5), + (5.0, 0.7, 0.5), + (10.0, 0.4, 0.5), + ] + ) + result = build_smart_crop_filter(path, 1080, 1920) + # Flat expression: lt()*lerp + gte()*lt()*lerp + gte()*clamp + assert "lt(t," in result + assert "gte(t," in result + + +# --------------------------------------------------------------------------- +# build_filter_chain with SMART mode +# --------------------------------------------------------------------------- + + +def _cfg(tmp_path: Path, **kwargs: object) -> ShortConfig: + defaults: dict[str, object] = { + "input": tmp_path / "clip.mkv", + "output": tmp_path / "out.mp4", + } + defaults.update(kwargs) + return ShortConfig(**defaults) # type: ignore[arg-type] + + +def test_filter_chain_smart_without_zoom_path_raises(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART) + with pytest.raises(RenderError, match="Smart crop mode requires a zoom path"): + build_filter_chain(cfg) + + +def test_filter_chain_smart_with_zoom_path(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART) + path = _make_zoom_path() + chain, audio = build_filter_chain(cfg, zoom_path=path) + # Should have scale, smart crop, final scale + assert "scale=-2:1920:flags=lanczos" in chain + assert "crop=w=" in chain + assert "scale=1080:1920:flags=lanczos" in chain + assert audio is None + + +def test_filter_chain_smart_with_speed(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART, speed=0.5) + path = _make_zoom_path() + chain, audio = build_filter_chain(cfg, zoom_path=path) + assert "setpts=PTS/0.5" in chain + assert audio == "atempo=0.5" + + +def test_filter_chain_smart_with_lut_and_subtitle(tmp_path: Path) -> None: + lut = tmp_path / "grade.cube" + sub = tmp_path / "subs.ass" + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART, lut=lut, subtitle=sub) + path = _make_zoom_path() + chain, _ = build_filter_chain(cfg, zoom_path=path) + assert chain.startswith(f"lut3d={lut}") + assert chain.endswith(f"subtitles=f={sub}") + assert "crop=w=" in chain + + +def test_filter_chain_pad_unaffected_by_zoom_path(tmp_path: Path) -> None: + """zoom_path should be ignored in non-smart modes.""" + cfg = _cfg(tmp_path, crop_mode=CropMode.PAD) + path = _make_zoom_path() + chain, _ = build_filter_chain(cfg, zoom_path=path) + assert "crop=w=" not in chain + assert "pad=1080:1920" in chain + + +def test_filter_chain_crop_unaffected_by_zoom_path(tmp_path: Path) -> None: + """Static crop mode should ignore zoom_path.""" + cfg = _cfg(tmp_path, crop_mode=CropMode.CROP) + path = _make_zoom_path() + chain, _ = build_filter_chain(cfg, zoom_path=path) + assert "x=(iw-ih*1080/1920)*0.5" in chain + + +# --------------------------------------------------------------------------- +# build_smart_pad_filter +# --------------------------------------------------------------------------- + + +# --------------------------------------------------------------------------- +# build_smart_pad_filter (overlay expression) +# --------------------------------------------------------------------------- + + +def test_smart_pad_filter_structure() -> None: + from reeln.core.zoom import build_smart_pad_filter + + path = _make_zoom_path() + result = build_smart_pad_filter(path, 1080, 1920) + assert result.startswith("overlay=x='") + assert ":y='(H-h)/2'" in result + assert ":eval=frame" in result + assert ":shortest=1" in result + + +def test_smart_pad_filter_x_tracks_center_x() -> None: + """X expression should follow center_x values via lerp.""" + from reeln.core.zoom import build_smart_pad_filter + + path = _make_zoom_path([(0.0, 0.2, 0.5), (10.0, 0.8, 0.5)]) + result = build_smart_pad_filter(path, 1080, 1920) + assert "min(0,max(W-w," in result + assert "0.2" in result + assert "0.8" in result + + +def test_smart_pad_filter_y_static_center() -> None: + """Y is always vertically centered — no dynamic tracking in pad mode.""" + from reeln.core.zoom import build_smart_pad_filter + + path = _make_zoom_path([(0.0, 0.5, 0.0), (10.0, 0.5, 1.0)]) + result = build_smart_pad_filter(path, 1080, 1920) + assert ":y='(H-h)/2'" in result + + +def test_smart_pad_filter_xy_single_quoted() -> None: + """X and Y expressions are single-quoted to protect commas.""" + from reeln.core.zoom import build_smart_pad_filter + + path = _make_zoom_path() + result = build_smart_pad_filter(path, 1080, 1920) + assert result.startswith("overlay=x='") + assert ":y='" in result + + +# --------------------------------------------------------------------------- +# build_smart_pad_graph (multi-stream overlay graph) +# --------------------------------------------------------------------------- + + +def test_smart_pad_graph_structure() -> None: + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=["scale=1080:-2:flags=lanczos"], + zoom_path=path, + target_width=1080, + target_height=1920, + ) + # colour background source (default 30fps) + assert "color=c=black:s=1080x1920:r=30/1[_bg]" in result + # pre-filters on input + assert "[0:v]scale=1080:-2:flags=lanczos[_fg]" in result + # overlay + assert "[_bg][_fg]overlay=" in result + assert ":eval=frame" in result + + +def test_smart_pad_graph_custom_color() -> None: + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=["scale=1080:-2:flags=lanczos"], + zoom_path=path, + target_width=1080, + target_height=1920, + pad_color="white", + ) + assert "color=c=white:s=1080x1920" in result + + +def test_smart_pad_graph_with_post_filters() -> None: + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=["scale=1080:-2:flags=lanczos"], + zoom_path=path, + target_width=1080, + target_height=1920, + post_filters=["subtitles=f=subs.ass"], + ) + # subtitle in a separate filter stage via stream label with format buffer + assert "[_ov]" in result + assert result.endswith("[_ov]format=yuv420p,subtitles=f=subs.ass") + + +def test_smart_pad_graph_no_pre_filters() -> None: + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=[], + zoom_path=path, + target_width=1080, + target_height=1920, + ) + assert "[0:v]null[_fg]" in result + + +def test_smart_pad_graph_custom_fps() -> None: + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=["scale=1080:-2:flags=lanczos"], + zoom_path=path, + target_width=1080, + target_height=1920, + source_fps=60.0, + ) + assert "color=c=black:s=1080x1920:r=60/1[_bg]" in result + + +def test_smart_pad_graph_fractional_fps() -> None: + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=["scale=1080:-2:flags=lanczos"], + zoom_path=path, + target_width=1080, + target_height=1920, + source_fps=59.94, + ) + assert "r=2997/50[_bg]" in result + + +def test_smart_pad_graph_ntsc_fps() -> None: + """NTSC 59.94005994… fps converts to exact 60000/1001 fraction.""" + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=["scale=1080:-2:flags=lanczos"], + zoom_path=path, + target_width=1080, + target_height=1920, + source_fps=60000 / 1001, + ) + assert "r=60000/1001[_bg]" in result + + +def test_fps_to_fraction_common_rates() -> None: + """_fps_to_fraction recovers exact fractions for common video rates.""" + from reeln.core.zoom import _fps_to_fraction + + assert _fps_to_fraction(30.0) == "30/1" + assert _fps_to_fraction(60.0) == "60/1" + assert _fps_to_fraction(24.0) == "24/1" + assert _fps_to_fraction(60000 / 1001) == "60000/1001" + assert _fps_to_fraction(30000 / 1001) == "30000/1001" + assert _fps_to_fraction(24000 / 1001) == "24000/1001" + + +def test_smart_pad_graph_square() -> None: + from reeln.core.zoom import build_smart_pad_graph + + path = _make_zoom_path() + result = build_smart_pad_graph( + pre_filters=["scale=1080:-2:flags=lanczos"], + zoom_path=path, + target_width=1080, + target_height=1080, + ) + assert "s=1080x1080" in result + + +# --------------------------------------------------------------------------- +# build_filter_chain with SMART_PAD mode +# --------------------------------------------------------------------------- + + +def test_filter_chain_smart_pad_source_fps(tmp_path: Path) -> None: + """Smart pad graph uses the provided source_fps for the color source.""" + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD) + path = _make_zoom_path() + chain, _ = build_filter_chain(cfg, zoom_path=path, source_fps=60.0) + assert "r=60/1[_bg]" in chain + + +def test_filter_chain_smart_pad_without_zoom_path_raises(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD) + with pytest.raises(RenderError, match="Smart crop mode requires a zoom path"): + build_filter_chain(cfg) + + +def test_filter_chain_smart_pad_with_zoom_path(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD) + path = _make_zoom_path() + chain, audio = build_filter_chain(cfg, zoom_path=path) + # Smart pad scales by height (like crop) for horizontal panning room + assert "scale=-2:1920:flags=lanczos" in chain + assert "color=c=black:s=1080x1920" in chain + assert "[_bg][_fg]overlay=" in chain + assert audio is None + + +def test_filter_chain_smart_pad_with_speed(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD, speed=0.5) + path = _make_zoom_path() + chain, audio = build_filter_chain(cfg, zoom_path=path) + assert "setpts=PTS/0.5" in chain + assert audio == "atempo=0.5" + + +def test_filter_chain_smart_pad_uses_pad_color(tmp_path: Path) -> None: + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD, pad_color="white") + path = _make_zoom_path() + chain, _ = build_filter_chain(cfg, zoom_path=path) + assert "color=c=white" in chain + + +def test_filter_chain_smart_pad_no_final_scale(tmp_path: Path) -> None: + """SMART_PAD should NOT have a final scale filter (overlay handles sizing).""" + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD) + path = _make_zoom_path() + chain, _ = build_filter_chain(cfg, zoom_path=path) + # Should only have one scale filter (the initial pre-filter), not a final scale + assert chain.count("scale=") == 1 + + +def test_filter_chain_smart_pad_with_subtitle(tmp_path: Path) -> None: + """Subtitle should be appended after overlay in the graph.""" + sub = tmp_path / "subs.ass" + cfg = _cfg(tmp_path, crop_mode=CropMode.SMART_PAD, subtitle=sub) + path = _make_zoom_path() + chain, _ = build_filter_chain(cfg, zoom_path=path) + assert f"subtitles=f={sub}" in chain + assert chain.endswith(f"[_ov]format=yuv420p,subtitles=f={sub}") diff --git a/tests/unit/core/test_zoom_debug.py b/tests/unit/core/test_zoom_debug.py new file mode 100644 index 0000000..43ab1fa --- /dev/null +++ b/tests/unit/core/test_zoom_debug.py @@ -0,0 +1,489 @@ +"""Tests for zoom debug output writer.""" + +from __future__ import annotations + +import json +from pathlib import Path +from unittest.mock import patch + +from reeln.core.zoom_debug import ( + _annotate_frames, + _build_annotate_command, + write_zoom_debug, +) +from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + + +def _make_frames(tmp_path: Path, count: int = 2) -> ExtractedFrames: + """Create ExtractedFrames with actual files on disk.""" + frames_dir = tmp_path / "extracted" + frames_dir.mkdir() + paths: list[Path] = [] + timestamps: list[float] = [] + for i in range(count): + p = frames_dir / f"frame_{i:04d}.png" + p.write_bytes(b"fake png data") + paths.append(p) + timestamps.append(float(i) * 5.0) + return ExtractedFrames( + frame_paths=tuple(paths), + timestamps=tuple(timestamps), + source_width=1920, + source_height=1080, + duration=10.0, + fps=60.0, + ) + + +def _make_zoom_path() -> ZoomPath: + return ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=5.0, center_x=0.7, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.5, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + +# --------------------------------------------------------------------------- +# write_zoom_debug +# --------------------------------------------------------------------------- + + +def test_write_zoom_debug_creates_directory(tmp_path: Path) -> None: + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path) + + result = write_zoom_debug(game_dir, frames, None, 1080, 1920) + + assert result == game_dir / "debug" / "zoom" + assert result.is_dir() + + +def test_write_zoom_debug_copies_frames(tmp_path: Path) -> None: + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=3) + + debug_dir = write_zoom_debug(game_dir, frames, None, 1080, 1920) + + for i in range(3): + dest = debug_dir / f"frame_{i:04d}.png" + assert dest.is_file() + assert not dest.is_symlink() + assert dest.read_bytes() == frames.frame_paths[i].read_bytes() + + +def test_write_zoom_debug_writes_json_without_zoom_path(tmp_path: Path) -> None: + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path) + + debug_dir = write_zoom_debug(game_dir, frames, None, 1080, 1920) + + json_path = debug_dir / "zoom_path.json" + assert json_path.is_file() + data = json.loads(json_path.read_text()) + assert data["source_width"] == 1920 + assert data["source_height"] == 1080 + assert data["duration"] == 10.0 + assert data["fps"] == 60.0 + assert data["frame_count"] == 2 + assert data["target_width"] == 1080 + assert data["target_height"] == 1920 + assert data["zoom_path"] is None + assert data["ffmpeg_expressions"] is None + + +def test_write_zoom_debug_writes_json_with_zoom_path(tmp_path: Path) -> None: + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path) + zoom_path = _make_zoom_path() + + debug_dir = write_zoom_debug(game_dir, frames, zoom_path, 1080, 1920) + + json_path = debug_dir / "zoom_path.json" + data = json.loads(json_path.read_text()) + assert data["zoom_path"] is not None + assert data["zoom_path"]["point_count"] == 3 + assert len(data["zoom_path"]["points"]) == 3 + assert data["zoom_path"]["points"][0]["center_x"] == 0.3 + assert data["zoom_path"]["points"][1]["center_x"] == 0.7 + assert data["ffmpeg_expressions"] is not None + assert "x_lerp" in data["ffmpeg_expressions"] + assert "y_lerp" in data["ffmpeg_expressions"] + assert "crop_filter" in data["ffmpeg_expressions"] + assert "crop=w=" in data["ffmpeg_expressions"]["crop_filter"] + + +def test_write_zoom_debug_idempotent(tmp_path: Path) -> None: + """Calling twice overwrites without error.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path) + zoom_path = _make_zoom_path() + + write_zoom_debug(game_dir, frames, zoom_path, 1080, 1920) + debug_dir = write_zoom_debug(game_dir, frames, zoom_path, 1080, 1920) + + assert (debug_dir / "zoom_path.json").is_file() + + +def test_write_zoom_debug_missing_frame_file(tmp_path: Path) -> None: + """Missing frame files are skipped (no copy created).""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = ExtractedFrames( + frame_paths=(tmp_path / "nonexistent.png",), + timestamps=(5.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=30.0, + ) + + debug_dir = write_zoom_debug(game_dir, frames, None, 1080, 1920) + + link = debug_dir / "frame_0000.png" + assert not link.exists() + + +def test_write_zoom_debug_zoom_path_confidence(tmp_path: Path) -> None: + """Confidence values are included in the JSON output.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=1) + zoom_path = ZoomPath( + points=(ZoomPoint(timestamp=5.0, center_x=0.5, center_y=0.5, confidence=0.85),), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + debug_dir = write_zoom_debug(game_dir, frames, zoom_path, 1080, 1920) + + data = json.loads((debug_dir / "zoom_path.json").read_text()) + assert data["zoom_path"]["points"][0]["confidence"] == 0.85 + + +# --------------------------------------------------------------------------- +# _build_annotate_command +# --------------------------------------------------------------------------- + + +def test_build_annotate_command_structure(tmp_path: Path) -> None: + """Command contains drawbox filters for crosshair and crop box.""" + frame = tmp_path / "frame.png" + out = tmp_path / "annotated.png" + point = ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5) + cmd = _build_annotate_command(Path("/usr/bin/ffmpeg"), frame, out, point, 1920, 1080, 1080, 1920) + assert cmd[0] == "/usr/bin/ffmpeg" + assert "-vf" in cmd + vf = cmd[cmd.index("-vf") + 1] + assert "drawbox" in vf + assert "green" in vf + assert "red" in vf + + +def test_build_annotate_command_clamps_crop_box(tmp_path: Path) -> None: + """Crop box position is clamped to source bounds.""" + point = ZoomPoint(timestamp=0.0, center_x=1.0, center_y=1.0) + cmd = _build_annotate_command( + Path("/usr/bin/ffmpeg"), + tmp_path / "f.png", + tmp_path / "o.png", + point, + 1920, + 1080, + 1080, + 1920, + ) + vf = cmd[cmd.index("-vf") + 1] + # Box x should be clamped, not go negative or beyond bounds + assert "drawbox" in vf + + +def test_build_annotate_command_zero_center(tmp_path: Path) -> None: + """Center at 0,0 still produces valid crosshair coordinates.""" + point = ZoomPoint(timestamp=0.0, center_x=0.0, center_y=0.0) + cmd = _build_annotate_command( + Path("/usr/bin/ffmpeg"), + tmp_path / "f.png", + tmp_path / "o.png", + point, + 1920, + 1080, + 1080, + 1920, + ) + vf = cmd[cmd.index("-vf") + 1] + assert "drawbox" in vf + + +# --------------------------------------------------------------------------- +# _annotate_frames +# --------------------------------------------------------------------------- + + +def test_annotate_frames_success(tmp_path: Path) -> None: + """Annotated frames are created when ffmpeg succeeds.""" + frames = _make_frames(tmp_path, count=2) + zoom_path = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=5.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + out_dir = tmp_path / "out" + out_dir.mkdir() + + with patch("reeln.core.zoom_debug.subprocess.run") as mock_run: + mock_run.return_value.returncode = 0 + result = _annotate_frames(Path("/usr/bin/ffmpeg"), frames, zoom_path, 1080, 1920, out_dir) + + assert mock_run.call_count == 2 + assert len(result) == 2 + assert all("annotated_" in p.name for p in result) + + +def test_annotate_frames_skips_missing_point(tmp_path: Path) -> None: + """Frames without a matching zoom point are skipped.""" + frames = _make_frames(tmp_path, count=2) + # Only one point at timestamp 0.0, no point at 5.0 + zoom_path = ZoomPath( + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + duration=10.0, + ) + out_dir = tmp_path / "out" + out_dir.mkdir() + + with patch("reeln.core.zoom_debug.subprocess.run") as mock_run: + mock_run.return_value.returncode = 0 + result = _annotate_frames(Path("/usr/bin/ffmpeg"), frames, zoom_path, 1080, 1920, out_dir) + + assert mock_run.call_count == 1 + assert len(result) == 1 + + +def test_annotate_frames_skips_missing_file(tmp_path: Path) -> None: + """Frames where the file doesn't exist are skipped.""" + frames = ExtractedFrames( + frame_paths=(tmp_path / "nonexistent.png",), + timestamps=(0.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=30.0, + ) + zoom_path = ZoomPath( + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + duration=10.0, + ) + out_dir = tmp_path / "out" + out_dir.mkdir() + + with patch("reeln.core.zoom_debug.subprocess.run") as mock_run: + result = _annotate_frames(Path("/usr/bin/ffmpeg"), frames, zoom_path, 1080, 1920, out_dir) + + mock_run.assert_not_called() + assert result == [] + + +def test_annotate_frames_handles_ffmpeg_error(tmp_path: Path) -> None: + """ffmpeg failure is logged and skipped, not raised.""" + import subprocess as sp + + frames = _make_frames(tmp_path, count=1) + zoom_path = ZoomPath( + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + duration=10.0, + ) + out_dir = tmp_path / "out" + out_dir.mkdir() + + with patch( + "reeln.core.zoom_debug.subprocess.run", + side_effect=sp.CalledProcessError(1, "ffmpeg"), + ): + result = _annotate_frames(Path("/usr/bin/ffmpeg"), frames, zoom_path, 1080, 1920, out_dir) + + assert result == [] + + +def test_annotate_frames_handles_timeout(tmp_path: Path) -> None: + """Timeout is caught and skipped.""" + import subprocess as sp + + frames = _make_frames(tmp_path, count=1) + zoom_path = ZoomPath( + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + duration=10.0, + ) + out_dir = tmp_path / "out" + out_dir.mkdir() + + with patch( + "reeln.core.zoom_debug.subprocess.run", + side_effect=sp.TimeoutExpired("ffmpeg", 30), + ): + result = _annotate_frames(Path("/usr/bin/ffmpeg"), frames, zoom_path, 1080, 1920, out_dir) + + assert result == [] + + +def test_annotate_frames_handles_os_error(tmp_path: Path) -> None: + """OSError (e.g. ffmpeg not found) is caught and skipped.""" + frames = _make_frames(tmp_path, count=1) + zoom_path = ZoomPath( + points=(ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5),), + source_width=1920, + source_height=1080, + duration=10.0, + ) + out_dir = tmp_path / "out" + out_dir.mkdir() + + with patch("reeln.core.zoom_debug.subprocess.run", side_effect=OSError("nope")): + result = _annotate_frames(Path("/usr/bin/ffmpeg"), frames, zoom_path, 1080, 1920, out_dir) + + assert result == [] + + +# --------------------------------------------------------------------------- +# write_zoom_debug with annotations +# --------------------------------------------------------------------------- + + +def test_write_zoom_debug_with_ffmpeg_path_generates_annotated(tmp_path: Path) -> None: + """When ffmpeg_path is provided with a zoom path, annotated frames are generated.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=2) + zoom_path = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=5.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + with patch("reeln.core.zoom_debug.subprocess.run") as mock_run: + mock_run.return_value.returncode = 0 + write_zoom_debug(game_dir, frames, zoom_path, 1080, 1920, ffmpeg_path=Path("/usr/bin/ffmpeg")) + + assert mock_run.call_count == 2 + + +def test_write_zoom_debug_annotate_all_fail(tmp_path: Path) -> None: + """When all annotate commands fail, no 'Wrote N annotated frames' log but no crash.""" + import subprocess as sp + + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=2) + zoom_path = ZoomPath( + points=( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=5.0, center_x=0.7, center_y=0.5), + ), + source_width=1920, + source_height=1080, + duration=10.0, + ) + + with patch( + "reeln.core.zoom_debug.subprocess.run", + side_effect=sp.CalledProcessError(1, "ffmpeg"), + ): + debug_dir = write_zoom_debug(game_dir, frames, zoom_path, 1080, 1920, ffmpeg_path=Path("/usr/bin/ffmpeg")) + + # Should still create the directory and zoom_path.json + assert debug_dir.is_dir() + assert (debug_dir / "zoom_path.json").is_file() + + +def test_write_zoom_debug_without_ffmpeg_skips_annotations(tmp_path: Path) -> None: + """Without ffmpeg_path, no annotated frames are generated.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=2) + zoom_path = _make_zoom_path() + + with patch("reeln.core.zoom_debug.subprocess.run") as mock_run: + write_zoom_debug(game_dir, frames, zoom_path, 1080, 1920) + + mock_run.assert_not_called() + + +def test_write_zoom_debug_without_zoom_path_skips_annotations(tmp_path: Path) -> None: + """Without zoom_path, no annotated frames are generated even with ffmpeg_path.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path) + + with patch("reeln.core.zoom_debug.subprocess.run") as mock_run: + write_zoom_debug(game_dir, frames, None, 1080, 1920, ffmpeg_path=Path("/usr/bin/ffmpeg")) + + mock_run.assert_not_called() + + +# --------------------------------------------------------------------------- +# write_zoom_debug with plugin_debug +# --------------------------------------------------------------------------- + + +def test_write_zoom_debug_plugin_debug(tmp_path: Path) -> None: + """Plugin debug data is written as plugin_debug.json.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=1) + + debug_data = {"prompt": "analyze this frame", "model": "gpt-4o", "tokens": 150} + debug_dir = write_zoom_debug(game_dir, frames, None, 1080, 1920, plugin_debug=debug_data) + + plugin_json = debug_dir / "plugin_debug.json" + assert plugin_json.is_file() + data = json.loads(plugin_json.read_text()) + assert data["prompt"] == "analyze this frame" + assert data["model"] == "gpt-4o" + assert data["tokens"] == 150 + + +def test_write_zoom_debug_no_plugin_debug(tmp_path: Path) -> None: + """Without plugin_debug, no plugin_debug.json is written.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=1) + + debug_dir = write_zoom_debug(game_dir, frames, None, 1080, 1920) + + assert not (debug_dir / "plugin_debug.json").exists() + + +def test_write_zoom_debug_plugin_debug_empty_dict(tmp_path: Path) -> None: + """Empty plugin_debug dict is not written.""" + game_dir = tmp_path / "game" + game_dir.mkdir() + frames = _make_frames(tmp_path, count=1) + + debug_dir = write_zoom_debug(game_dir, frames, None, 1080, 1920, plugin_debug={}) + + assert not (debug_dir / "plugin_debug.json").exists() diff --git a/tests/unit/models/test_branding.py b/tests/unit/models/test_branding.py new file mode 100644 index 0000000..c1eeeb7 --- /dev/null +++ b/tests/unit/models/test_branding.py @@ -0,0 +1,63 @@ +"""Tests for branding configuration model.""" + +from __future__ import annotations + +import pytest + +from reeln.models.branding import BrandingConfig + + +class TestBrandingConfigDefaults: + def test_enabled_default(self) -> None: + config = BrandingConfig() + assert config.enabled is True + + def test_template_default(self) -> None: + config = BrandingConfig() + assert config.template == "builtin:branding" + + def test_duration_default(self) -> None: + config = BrandingConfig() + assert config.duration == 5.0 + + +class TestBrandingConfigCustom: + def test_disabled(self) -> None: + config = BrandingConfig(enabled=False) + assert config.enabled is False + + def test_custom_template(self) -> None: + config = BrandingConfig(template="/path/to/custom.ass") + assert config.template == "/path/to/custom.ass" + + def test_custom_duration(self) -> None: + config = BrandingConfig(duration=5.0) + assert config.duration == 5.0 + + +class TestBrandingConfigFrozen: + def test_cannot_mutate_enabled(self) -> None: + config = BrandingConfig() + with pytest.raises(AttributeError): + config.enabled = False # type: ignore[misc] + + def test_cannot_mutate_template(self) -> None: + config = BrandingConfig() + with pytest.raises(AttributeError): + config.template = "other" # type: ignore[misc] + + def test_cannot_mutate_duration(self) -> None: + config = BrandingConfig() + with pytest.raises(AttributeError): + config.duration = 10.0 # type: ignore[misc] + + +class TestBrandingConfigEquality: + def test_equal_defaults(self) -> None: + assert BrandingConfig() == BrandingConfig() + + def test_not_equal_different_enabled(self) -> None: + assert BrandingConfig(enabled=True) != BrandingConfig(enabled=False) + + def test_not_equal_different_duration(self) -> None: + assert BrandingConfig(duration=5.0) != BrandingConfig(duration=3.0) diff --git a/tests/unit/models/test_game.py b/tests/unit/models/test_game.py index 8bebb7a..e428dc0 100644 --- a/tests/unit/models/test_game.py +++ b/tests/unit/models/test_game.py @@ -38,6 +38,10 @@ def test_game_info_defaults() -> None: assert gi.period_length == 0 assert gi.description == "" assert gi.thumbnail == "" + assert gi.level == "" + assert gi.home_slug == "" + assert gi.away_slug == "" + assert gi.tournament == "" def test_game_info_custom_fields() -> None: @@ -158,6 +162,8 @@ def test_game_state_defaults() -> None: assert gs.finished_at == "" assert gs.renders == [] assert gs.events == [] + assert gs.segment_outputs == [] + assert gs.highlights_output == "" def test_game_state_livestreams_default() -> None: @@ -212,6 +218,18 @@ def test_game_state_with_events() -> None: assert gs.events[0].event_type == "goal" +def test_game_state_with_segment_outputs() -> None: + gi = GameInfo(date="2026-02-26", home_team="a", away_team="b", sport="hockey") + gs = GameState(game_info=gi, segment_outputs=["period-1_2026-02-26.mkv", "period-2_2026-02-26.mkv"]) + assert gs.segment_outputs == ["period-1_2026-02-26.mkv", "period-2_2026-02-26.mkv"] + + +def test_game_state_with_highlights_output() -> None: + gi = GameInfo(date="2026-02-26", home_team="a", away_team="b", sport="hockey") + gs = GameState(game_info=gi, highlights_output="a_vs_b_2026-02-26.mkv") + assert gs.highlights_output == "a_vs_b_2026-02-26.mkv" + + # --------------------------------------------------------------------------- # Serialization: GameInfo # --------------------------------------------------------------------------- @@ -238,6 +256,10 @@ def test_game_info_to_dict() -> None: "period_length": 0, "description": "", "thumbnail": "", + "level": "", + "home_slug": "", + "away_slug": "", + "tournament": "", } @@ -280,6 +302,10 @@ def test_dict_to_game_info_defaults() -> None: assert gi.period_length == 0 assert gi.description == "" assert gi.thumbnail == "" + assert gi.level == "" + assert gi.home_slug == "" + assert gi.away_slug == "" + assert gi.tournament == "" def test_dict_to_game_info_legacy_rink_fallback() -> None: @@ -359,6 +385,48 @@ def test_dict_to_game_info_with_description_and_thumbnail() -> None: assert gi.thumbnail == "/tmp/thumb.jpg" +def test_game_info_to_dict_with_level_and_slugs() -> None: + gi = GameInfo( + date="2026-03-04", + home_team="Roseville", + away_team="Mahtomedi", + sport="hockey", + level="bantam", + home_slug="roseville", + away_slug="mahtomedi", + ) + d = game_info_to_dict(gi) + assert d["level"] == "bantam" + assert d["home_slug"] == "roseville" + assert d["away_slug"] == "mahtomedi" + + +def test_dict_to_game_info_with_level_and_slugs() -> None: + d = { + "date": "2026-03-04", + "home_team": "Roseville", + "away_team": "Mahtomedi", + "sport": "hockey", + "level": "bantam", + "home_slug": "roseville", + "away_slug": "mahtomedi", + } + gi = dict_to_game_info(d) + assert gi.level == "bantam" + assert gi.home_slug == "roseville" + assert gi.away_slug == "mahtomedi" + + +def test_dict_to_game_info_level_defaults_empty() -> None: + """Backward compatibility: missing level/slug/tournament fields default to empty strings.""" + d = {"date": "2026-02-26", "home_team": "a", "away_team": "b", "sport": "generic"} + gi = dict_to_game_info(d) + assert gi.level == "" + assert gi.home_slug == "" + assert gi.away_slug == "" + assert gi.tournament == "" + + def test_game_info_round_trip() -> None: gi = GameInfo( date="2026-03-01", @@ -375,6 +443,57 @@ def test_game_info_round_trip() -> None: assert dict_to_game_info(game_info_to_dict(gi)) == gi +def test_game_info_to_dict_with_tournament() -> None: + gi = GameInfo( + date="2026-03-21", + home_team="North", + away_team="South", + sport="hockey", + tournament="2026 Stars of Tomorrow", + ) + d = game_info_to_dict(gi) + assert d["tournament"] == "2026 Stars of Tomorrow" + + +def test_dict_to_game_info_with_tournament() -> None: + d = { + "date": "2026-03-21", + "home_team": "North", + "away_team": "South", + "sport": "hockey", + "tournament": "2026 Stars of Tomorrow", + } + gi = dict_to_game_info(d) + assert gi.tournament == "2026 Stars of Tomorrow" + + +def test_game_info_round_trip_with_tournament() -> None: + gi = GameInfo( + date="2026-03-21", + home_team="North", + away_team="South", + sport="hockey", + level="2016", + home_slug="north", + away_slug="south", + tournament="2026 Stars of Tomorrow", + ) + assert dict_to_game_info(game_info_to_dict(gi)) == gi + + +def test_game_info_round_trip_with_level_and_slugs() -> None: + gi = GameInfo( + date="2026-03-04", + home_team="Roseville", + away_team="Mahtomedi", + sport="hockey", + level="bantam", + home_slug="roseville", + away_slug="mahtomedi", + ) + assert dict_to_game_info(game_info_to_dict(gi)) == gi + + # --------------------------------------------------------------------------- # Serialization: RenderEntry # --------------------------------------------------------------------------- @@ -570,6 +689,22 @@ def test_game_state_to_dict() -> None: assert d["finished_at"] == "" assert d["renders"] == [] assert d["events"] == [] + assert d["segment_outputs"] == [] + assert d["highlights_output"] == "" + + +def test_game_state_to_dict_with_segment_outputs() -> None: + gi = GameInfo(date="2026-02-26", home_team="a", away_team="b", sport="hockey") + gs = GameState(game_info=gi, segment_outputs=["period-1_2026-02-26.mkv"]) + d = game_state_to_dict(gs) + assert d["segment_outputs"] == ["period-1_2026-02-26.mkv"] + + +def test_game_state_to_dict_with_highlights_output() -> None: + gi = GameInfo(date="2026-02-26", home_team="a", away_team="b", sport="hockey") + gs = GameState(game_info=gi, highlights_output="a_vs_b_2026-02-26.mkv") + d = game_state_to_dict(gs) + assert d["highlights_output"] == "a_vs_b_2026-02-26.mkv" def test_game_state_to_dict_with_livestreams() -> None: @@ -662,6 +797,64 @@ def test_dict_to_game_state_defaults() -> None: assert gs.finished_at == "" assert gs.renders == [] assert gs.events == [] + assert gs.segment_outputs == [] + assert gs.highlights_output == "" + + +def test_dict_to_game_state_with_segment_outputs() -> None: + d = { + "game_info": { + "date": "2026-02-26", + "home_team": "a", + "away_team": "b", + "sport": "generic", + }, + "segment_outputs": ["period-1_2026-02-26.mkv", "period-2_2026-02-26.mkv"], + } + gs = dict_to_game_state(d) + assert gs.segment_outputs == ["period-1_2026-02-26.mkv", "period-2_2026-02-26.mkv"] + + +def test_dict_to_game_state_with_highlights_output() -> None: + d = { + "game_info": { + "date": "2026-02-26", + "home_team": "a", + "away_team": "b", + "sport": "generic", + }, + "highlights_output": "a_vs_b_2026-02-26.mkv", + } + gs = dict_to_game_state(d) + assert gs.highlights_output == "a_vs_b_2026-02-26.mkv" + + +def test_dict_to_game_state_segment_outputs_missing() -> None: + """Backward compatibility: missing segment_outputs defaults to empty list.""" + d = { + "game_info": { + "date": "2026-02-26", + "home_team": "a", + "away_team": "b", + "sport": "generic", + }, + } + gs = dict_to_game_state(d) + assert gs.segment_outputs == [] + + +def test_dict_to_game_state_highlights_output_missing() -> None: + """Backward compatibility: missing highlights_output defaults to empty string.""" + d = { + "game_info": { + "date": "2026-02-26", + "home_team": "a", + "away_team": "b", + "sport": "generic", + }, + } + gs = dict_to_game_state(d) + assert gs.highlights_output == "" def test_dict_to_game_state_with_livestreams() -> None: @@ -794,5 +987,7 @@ def test_game_state_round_trip() -> None: renders=[entry], events=[ev], livestreams={"google": "https://youtube.com/live/abc123"}, + segment_outputs=["quarter-1_2026-03-01.mkv", "quarter-2_2026-03-01.mkv"], + highlights_output="x_vs_y_2026-03-01.mkv", ) assert dict_to_game_state(game_state_to_dict(gs)) == gs diff --git a/tests/unit/models/test_profile.py b/tests/unit/models/test_profile.py index 4b6684d..39f4aef 100644 --- a/tests/unit/models/test_profile.py +++ b/tests/unit/models/test_profile.py @@ -7,6 +7,7 @@ from reeln.models.profile import ( IterationConfig, RenderProfile, + SpeedSegment, dict_to_iteration_config, dict_to_render_profile, iteration_config_to_dict, @@ -24,7 +25,10 @@ def test_render_profile_defaults() -> None: assert profile.width is None assert profile.height is None assert profile.crop_mode is None + assert profile.scale is None + assert profile.smart is None assert profile.speed is None + assert profile.speed_segments is None assert profile.lut is None assert profile.subtitle_template is None assert profile.codec is None @@ -49,6 +53,12 @@ def test_render_profile_with_values() -> None: assert profile.subtitle_template == "goal.ass" +def test_render_profile_scale_and_smart() -> None: + profile = RenderProfile(name="zoomed", scale=1.5, smart=True) + assert profile.scale == 1.5 + assert profile.smart is True + + def test_render_profile_all_fields() -> None: profile = RenderProfile( name="full", @@ -58,6 +68,8 @@ def test_render_profile_all_fields() -> None: anchor_x=0.3, anchor_y=0.7, pad_color="white", + scale=1.3, + smart=True, speed=2.0, lut="cinematic.cube", subtitle_template="overlay.ass", @@ -72,6 +84,8 @@ def test_render_profile_all_fields() -> None: assert profile.anchor_x == 0.3 assert profile.anchor_y == 0.7 assert profile.pad_color == "white" + assert profile.scale == 1.3 + assert profile.smart is True assert profile.preset == "slow" assert profile.audio_codec == "opus" assert profile.audio_bitrate == "192k" @@ -83,6 +97,39 @@ def test_render_profile_is_frozen() -> None: profile.name = "other" # type: ignore[misc] +# --------------------------------------------------------------------------- +# SpeedSegment +# --------------------------------------------------------------------------- + + +def test_speed_segment_creation() -> None: + seg = SpeedSegment(speed=0.5, until=5.0) + assert seg.speed == 0.5 + assert seg.until == 5.0 + + +def test_speed_segment_defaults() -> None: + seg = SpeedSegment(speed=1.0) + assert seg.until is None + + +def test_speed_segment_is_frozen() -> None: + seg = SpeedSegment(speed=1.0) + with pytest.raises(AttributeError): + seg.speed = 2.0 # type: ignore[misc] + + +def test_render_profile_with_speed_segments() -> None: + segs = ( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0), + ) + profile = RenderProfile(name="variable", speed_segments=segs) + assert profile.speed_segments == segs + assert profile.speed is None + + # --------------------------------------------------------------------------- # IterationConfig # --------------------------------------------------------------------------- @@ -158,6 +205,19 @@ def test_render_profile_to_dict_with_values() -> None: assert d == {"speed": 0.5, "codec": "libx265", "crf": 22} +def test_render_profile_to_dict_scale_and_smart() -> None: + profile = RenderProfile(name="zoomed", scale=1.5, smart=True) + d = render_profile_to_dict(profile) + assert d == {"scale": 1.5, "smart": True} + + +def test_render_profile_to_dict_smart_false_included() -> None: + """smart=False is not None, so it should be serialized.""" + profile = RenderProfile(name="no-smart", smart=False) + d = render_profile_to_dict(profile) + assert d == {"smart": False} + + def test_render_profile_to_dict_all_fields() -> None: profile = RenderProfile( name="full", @@ -167,6 +227,8 @@ def test_render_profile_to_dict_all_fields() -> None: anchor_x=0.5, anchor_y=0.5, pad_color="black", + scale=1.3, + smart=True, speed=1.0, lut="warm.cube", subtitle_template="overlay.ass", @@ -177,7 +239,7 @@ def test_render_profile_to_dict_all_fields() -> None: audio_bitrate="128k", ) d = render_profile_to_dict(profile) - assert len(d) == 14 # all fields except name + assert len(d) == 16 # all fields except name def test_dict_to_render_profile_minimal() -> None: @@ -195,6 +257,21 @@ def test_dict_to_render_profile_with_values() -> None: assert profile.codec == "libx265" assert profile.crf == 22 assert profile.width is None + assert profile.scale is None + assert profile.smart is None + + +def test_dict_to_render_profile_scale_and_smart() -> None: + data = {"scale": 1.3, "smart": True} + profile = dict_to_render_profile("zoomed", data) + assert profile.scale == 1.3 + assert profile.smart is True + + +def test_dict_to_render_profile_smart_false() -> None: + data = {"smart": False} + profile = dict_to_render_profile("no-smart", data) + assert profile.smart is False def test_render_profile_round_trip() -> None: @@ -206,6 +283,8 @@ def test_render_profile_round_trip() -> None: anchor_x=0.5, anchor_y=0.5, pad_color="black", + scale=1.3, + smart=True, speed=0.5, lut="warm.cube", subtitle_template="overlay.ass", @@ -220,6 +299,72 @@ def test_render_profile_round_trip() -> None: assert restored == original +# --------------------------------------------------------------------------- +# Serialization: SpeedSegment / speed_segments +# --------------------------------------------------------------------------- + + +def test_render_profile_to_dict_speed_segments() -> None: + segs = ( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0), + ) + profile = RenderProfile(name="var", speed_segments=segs) + d = render_profile_to_dict(profile) + assert d == { + "speed_segments": [ + {"speed": 1.0, "until": 5.0}, + {"speed": 0.5, "until": 8.0}, + {"speed": 1.0}, + ] + } + + +def test_render_profile_to_dict_speed_segments_none() -> None: + profile = RenderProfile(name="no-segs") + d = render_profile_to_dict(profile) + assert "speed_segments" not in d + + +def test_dict_to_render_profile_speed_segments() -> None: + data = { + "speed_segments": [ + {"speed": 1.0, "until": 5.0}, + {"speed": 0.5, "until": 8.0}, + {"speed": 1.0}, + ] + } + profile = dict_to_render_profile("var", data) + assert profile.speed_segments is not None + assert len(profile.speed_segments) == 3 + assert profile.speed_segments[0] == SpeedSegment(speed=1.0, until=5.0) + assert profile.speed_segments[1] == SpeedSegment(speed=0.5, until=8.0) + assert profile.speed_segments[2] == SpeedSegment(speed=1.0, until=None) + + +def test_dict_to_render_profile_speed_segments_none() -> None: + profile = dict_to_render_profile("no-segs", {}) + assert profile.speed_segments is None + + +def test_dict_to_render_profile_speed_segments_non_list_ignored() -> None: + profile = dict_to_render_profile("bad", {"speed_segments": "invalid"}) + assert profile.speed_segments is None + + +def test_speed_segments_round_trip() -> None: + segs = ( + SpeedSegment(speed=1.0, until=5.0), + SpeedSegment(speed=0.5, until=8.0), + SpeedSegment(speed=1.0), + ) + original = RenderProfile(name="var", speed_segments=segs) + d = render_profile_to_dict(original) + restored = dict_to_render_profile("var", d) + assert restored == original + + # --------------------------------------------------------------------------- # Serialization: IterationConfig # --------------------------------------------------------------------------- diff --git a/tests/unit/models/test_short.py b/tests/unit/models/test_short.py index 24dbd8c..0abec54 100644 --- a/tests/unit/models/test_short.py +++ b/tests/unit/models/test_short.py @@ -28,9 +28,19 @@ def test_crop_mode_crop() -> None: assert CropMode.CROP.value == "crop" +def test_crop_mode_smart() -> None: + assert CropMode.SMART.value == "smart" + + +def test_crop_mode_smart_pad() -> None: + assert CropMode.SMART_PAD.value == "smart_pad" + + def test_crop_mode_from_string() -> None: assert CropMode("pad") == CropMode.PAD assert CropMode("crop") == CropMode.CROP + assert CropMode("smart") == CropMode.SMART + assert CropMode("smart_pad") == CropMode.SMART_PAD def test_output_format_vertical() -> None: @@ -104,6 +114,8 @@ def test_short_config_defaults(tmp_path: Path) -> None: assert cfg.crop_mode == CropMode.PAD assert cfg.anchor_x == 0.5 assert cfg.anchor_y == 0.5 + assert cfg.scale == 1.0 + assert cfg.smart is False assert cfg.pad_color == "black" assert cfg.speed == 1.0 assert cfg.lut is None @@ -113,6 +125,7 @@ def test_short_config_defaults(tmp_path: Path) -> None: assert cfg.crf == 18 assert cfg.audio_codec == "aac" assert cfg.audio_bitrate == "128k" + assert cfg.smart_zoom_frames == 5 def test_short_config_custom_values(tmp_path: Path) -> None: @@ -146,6 +159,32 @@ def test_short_config_custom_values(tmp_path: Path) -> None: assert cfg.codec == "libx265" +def test_short_config_scale_custom(tmp_path: Path) -> None: + cfg = ShortConfig(input=tmp_path / "clip.mkv", output=tmp_path / "out.mp4", scale=1.5) + assert cfg.scale == 1.5 + + +def test_short_config_smart_custom(tmp_path: Path) -> None: + cfg = ShortConfig(input=tmp_path / "clip.mkv", output=tmp_path / "out.mp4", smart=True) + assert cfg.smart is True + + +def test_short_config_smart_zoom_frames_custom(tmp_path: Path) -> None: + cfg = ShortConfig(input=tmp_path / "clip.mkv", output=tmp_path / "out.mp4", smart_zoom_frames=10) + assert cfg.smart_zoom_frames == 10 + + +def test_short_config_branding_default_none(tmp_path: Path) -> None: + cfg = ShortConfig(input=tmp_path / "clip.mkv", output=tmp_path / "out.mp4") + assert cfg.branding is None + + +def test_short_config_branding_custom(tmp_path: Path) -> None: + brand = tmp_path / "brand.ass" + cfg = ShortConfig(input=tmp_path / "clip.mkv", output=tmp_path / "out.mp4", branding=brand) + assert cfg.branding == brand + + def test_short_config_is_frozen(tmp_path: Path) -> None: cfg = ShortConfig(input=tmp_path / "clip.mkv", output=tmp_path / "out.mp4") with pytest.raises(AttributeError): diff --git a/tests/unit/models/test_team.py b/tests/unit/models/test_team.py index 0dd2bb3..c2d26d9 100644 --- a/tests/unit/models/test_team.py +++ b/tests/unit/models/test_team.py @@ -3,6 +3,7 @@ from __future__ import annotations from reeln.models.team import ( + RosterEntry, TeamProfile, dict_to_team_profile, team_profile_to_dict, @@ -164,3 +165,21 @@ def test_team_profile_round_trip() -> None: def test_team_profile_round_trip_defaults() -> None: tp = TeamProfile(team_name="X", short_name="X", level="jv") assert dict_to_team_profile(team_profile_to_dict(tp)) == tp + + +# --------------------------------------------------------------------------- +# RosterEntry +# --------------------------------------------------------------------------- + + +def test_roster_entry_fields() -> None: + entry = RosterEntry(number="48", name="John Smith", position="C") + assert entry.number == "48" + assert entry.name == "John Smith" + assert entry.position == "C" + + +def test_roster_entry_equality() -> None: + a = RosterEntry(number="10", name="Jane Doe", position="D") + b = RosterEntry(number="10", name="Jane Doe", position="D") + assert a == b diff --git a/tests/unit/models/test_zoom.py b/tests/unit/models/test_zoom.py new file mode 100644 index 0000000..f8a03ef --- /dev/null +++ b/tests/unit/models/test_zoom.py @@ -0,0 +1,134 @@ +"""Tests for smart target zoom data models.""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from reeln.models.zoom import ExtractedFrames, ZoomPath, ZoomPoint + +# --------------------------------------------------------------------------- +# ZoomPoint +# --------------------------------------------------------------------------- + + +def test_zoom_point_defaults() -> None: + p = ZoomPoint(timestamp=1.0, center_x=0.5, center_y=0.5) + assert p.timestamp == 1.0 + assert p.center_x == 0.5 + assert p.center_y == 0.5 + assert p.confidence == 1.0 + + +def test_zoom_point_custom_confidence() -> None: + p = ZoomPoint(timestamp=2.5, center_x=0.3, center_y=0.7, confidence=0.85) + assert p.confidence == 0.85 + + +def test_zoom_point_is_frozen() -> None: + p = ZoomPoint(timestamp=1.0, center_x=0.5, center_y=0.5) + with pytest.raises(AttributeError): + p.center_x = 0.6 # type: ignore[misc] + + +def test_zoom_point_boundary_values() -> None: + p = ZoomPoint(timestamp=0.0, center_x=0.0, center_y=0.0, confidence=0.0) + assert p.timestamp == 0.0 + assert p.center_x == 0.0 + assert p.center_y == 0.0 + assert p.confidence == 0.0 + + +def test_zoom_point_max_boundary() -> None: + p = ZoomPoint(timestamp=300.0, center_x=1.0, center_y=1.0, confidence=1.0) + assert p.center_x == 1.0 + assert p.center_y == 1.0 + + +# --------------------------------------------------------------------------- +# ZoomPath +# --------------------------------------------------------------------------- + + +def test_zoom_path_single_point() -> None: + pt = ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5) + path = ZoomPath(points=(pt,), source_width=1920, source_height=1080, duration=10.0) + assert len(path.points) == 1 + assert path.source_width == 1920 + assert path.source_height == 1080 + assert path.duration == 10.0 + + +def test_zoom_path_multiple_points() -> None: + pts = ( + ZoomPoint(timestamp=0.0, center_x=0.3, center_y=0.5), + ZoomPoint(timestamp=5.0, center_x=0.7, center_y=0.5), + ZoomPoint(timestamp=10.0, center_x=0.5, center_y=0.5), + ) + path = ZoomPath(points=pts, source_width=1920, source_height=1080, duration=10.0) + assert len(path.points) == 3 + assert path.points[0].center_x == 0.3 + assert path.points[2].center_x == 0.5 + + +def test_zoom_path_is_frozen() -> None: + pt = ZoomPoint(timestamp=0.0, center_x=0.5, center_y=0.5) + path = ZoomPath(points=(pt,), source_width=1920, source_height=1080, duration=10.0) + with pytest.raises(AttributeError): + path.duration = 20.0 # type: ignore[misc] + + +def test_zoom_path_empty_points() -> None: + path = ZoomPath(points=(), source_width=1920, source_height=1080, duration=10.0) + assert len(path.points) == 0 + + +# --------------------------------------------------------------------------- +# ExtractedFrames +# --------------------------------------------------------------------------- + + +def test_extracted_frames_basic(tmp_path: Path) -> None: + frames = (tmp_path / "frame_0.png", tmp_path / "frame_1.png") + timestamps = (0.0, 5.0) + ef = ExtractedFrames( + frame_paths=frames, + timestamps=timestamps, + source_width=1920, + source_height=1080, + duration=10.0, + fps=59.94, + ) + assert len(ef.frame_paths) == 2 + assert len(ef.timestamps) == 2 + assert ef.source_width == 1920 + assert ef.source_height == 1080 + assert ef.duration == 10.0 + assert ef.fps == 59.94 + + +def test_extracted_frames_is_frozen(tmp_path: Path) -> None: + ef = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(0.0,), + source_width=1920, + source_height=1080, + duration=10.0, + fps=30.0, + ) + with pytest.raises(AttributeError): + ef.fps = 60.0 # type: ignore[misc] + + +def test_extracted_frames_single_frame(tmp_path: Path) -> None: + ef = ExtractedFrames( + frame_paths=(tmp_path / "f.png",), + timestamps=(5.0,), + source_width=3840, + source_height=2160, + duration=30.0, + fps=60.0, + ) + assert len(ef.frame_paths) == 1 + assert ef.timestamps[0] == 5.0 diff --git a/tests/unit/plugins/test_hooks.py b/tests/unit/plugins/test_hooks.py index ec0aeb7..9883839 100644 --- a/tests/unit/plugins/test_hooks.py +++ b/tests/unit/plugins/test_hooks.py @@ -22,11 +22,12 @@ def test_hook_values() -> None: assert Hook.ON_HIGHLIGHTS_MERGED.value == "on_highlights_merged" assert Hook.ON_SEGMENT_START.value == "on_segment_start" assert Hook.ON_SEGMENT_COMPLETE.value == "on_segment_complete" + assert Hook.ON_FRAMES_EXTRACTED.value == "on_frames_extracted" assert Hook.ON_ERROR.value == "on_error" def test_hook_enum_count() -> None: - assert len(Hook) == 13 + assert len(Hook) == 14 def test_hook_members_unique() -> None: diff --git a/tests/unit/plugins/test_loader.py b/tests/unit/plugins/test_loader.py index b3d5c5b..3cd4c0d 100644 --- a/tests/unit/plugins/test_loader.py +++ b/tests/unit/plugins/test_loader.py @@ -19,9 +19,11 @@ _parse_allowed_hooks, _register_plugin_hooks, activate_plugins, + collect_doctor_checks, discover_plugins, load_enabled_plugins, load_plugin, + set_enforce_hooks_override, ) from reeln.plugins.registry import HookRegistry, get_registry, reset_registry @@ -280,10 +282,7 @@ def test_load_enabled_plugins_not_installed_logs_debug(caplog: pytest.LogCapture assert "missing" not in result # Should appear in debug log, not warning - assert any( - "not installed" in r.message and r.levelno == logging.DEBUG - for r in caplog.records - ) + assert any("not installed" in r.message and r.levelno == logging.DEBUG for r in caplog.records) def test_load_enabled_plugins_with_settings() -> None: @@ -489,9 +488,7 @@ def test_register_plugin_hooks_with_allowed_hooks_blocks_undeclared() -> None: """Auto-discovered hooks not in allowed set are blocked.""" registry = HookRegistry() plugin = _AutoDiscoverPlugin() - _register_plugin_hooks( - "auto", plugin, registry, allowed_hooks={Hook.ON_GAME_INIT} - ) + _register_plugin_hooks("auto", plugin, registry, allowed_hooks={Hook.ON_GAME_INIT}) # ON_GAME_INIT is allowed assert registry.has_handlers(Hook.ON_GAME_INIT) @@ -503,9 +500,7 @@ def test_register_plugin_hooks_with_allowed_hooks_explicit_register() -> None: """Explicit register() with allowed_hooks wraps registry in FilteredRegistry.""" registry = HookRegistry() plugin = _ExplicitRegisterPlugin() - _register_plugin_hooks( - "explicit", plugin, registry, allowed_hooks={Hook.ON_GAME_INIT} - ) + _register_plugin_hooks("explicit", plugin, registry, allowed_hooks={Hook.ON_GAME_INIT}) assert registry.has_handlers(Hook.ON_GAME_INIT) @@ -522,13 +517,9 @@ def test_activate_plugins_enforce_hooks_disabled() -> None: with ( patch("reeln.plugins.loader.importlib.metadata.entry_points", return_value=[ep]), - patch( - "reeln.plugins.loader._fetch_registry_capabilities" - ) as mock_fetch, + patch("reeln.plugins.loader._fetch_registry_capabilities") as mock_fetch, ): - result = activate_plugins( - PluginsConfig(enabled=["auto"], enforce_hooks=False) - ) + result = activate_plugins(PluginsConfig(enabled=["auto"], enforce_hooks=False)) # Registry should NOT have been fetched mock_fetch.assert_not_called() @@ -541,9 +532,7 @@ def test_activate_plugins_enforce_hooks_default_true() -> None: ep = _make_entry_point("test", _NoConfigPlugin) with ( patch("reeln.plugins.loader.importlib.metadata.entry_points", return_value=[ep]), - patch( - "reeln.plugins.loader._fetch_registry_capabilities", return_value={} - ) as mock_fetch, + patch("reeln.plugins.loader._fetch_registry_capabilities", return_value={}) as mock_fetch, ): activate_plugins(PluginsConfig(enabled=["test"])) @@ -563,3 +552,157 @@ def test_activate_plugins_idempotent() -> None: handlers = registry._handlers.get(Hook.ON_GAME_INIT, []) assert len(handlers) == 1 reset_registry() + + +# --------------------------------------------------------------------------- +# collect_doctor_checks +# --------------------------------------------------------------------------- + + +def test_collect_doctor_checks_from_plugin() -> None: + """Collects DoctorCheck instances from plugins that expose doctor_checks().""" + from reeln.models.doctor import CheckResult, CheckStatus, DoctorCheck + + class MyCheck: + name = "my_check" + + def run(self) -> list[CheckResult]: + return [CheckResult(name="my_check", status=CheckStatus.PASS, message="ok")] + + class PluginWithDoctor: + name = "test-plugin" + + def doctor_checks(self) -> list[DoctorCheck]: + return [MyCheck()] + + loaded = {"test-plugin": PluginWithDoctor()} + checks = collect_doctor_checks(loaded) + + assert len(checks) == 1 + results = checks[0].run() + assert len(results) == 1 + assert results[0].status == CheckStatus.PASS + + +def test_collect_doctor_checks_skips_plugins_without() -> None: + """Plugins without doctor_checks() are silently skipped.""" + + class PlainPlugin: + name = "plain" + + loaded = {"plain": PlainPlugin()} + checks = collect_doctor_checks(loaded) + + assert checks == [] + + +def test_collect_doctor_checks_handles_failure(caplog: pytest.LogCaptureFixture) -> None: + """Failures in doctor_checks() are logged and skipped.""" + + class BadPlugin: + name = "bad" + + def doctor_checks(self) -> list[object]: + raise RuntimeError("boom") + + loaded = {"bad": BadPlugin()} + with caplog.at_level(logging.WARNING): + checks = collect_doctor_checks(loaded) + + assert checks == [] + assert "bad" in caplog.text + assert "doctor_checks()" in caplog.text + + +def test_collect_doctor_checks_multiple_plugins() -> None: + """Collects checks from multiple plugins.""" + from reeln.models.doctor import CheckResult, CheckStatus + + class CheckA: + name = "check_a" + + def run(self) -> list[CheckResult]: + return [CheckResult(name="check_a", status=CheckStatus.PASS, message="a ok")] + + class CheckB: + name = "check_b" + + def run(self) -> list[CheckResult]: + return [CheckResult(name="check_b", status=CheckStatus.WARN, message="b warn")] + + class PluginA: + name = "plugin-a" + + def doctor_checks(self) -> list[object]: + return [CheckA()] + + class PluginB: + name = "plugin-b" + + def doctor_checks(self) -> list[object]: + return [CheckB()] + + loaded = {"plugin-a": PluginA(), "plugin-b": PluginB()} + checks = collect_doctor_checks(loaded) + + assert len(checks) == 2 + + +def test_collect_doctor_checks_empty() -> None: + """Empty loaded plugins returns empty list.""" + assert collect_doctor_checks({}) == [] + + +# --------------------------------------------------------------------------- +# set_enforce_hooks_override (CLI --no-enforce-hooks) +# --------------------------------------------------------------------------- + + +def test_set_enforce_hooks_override_disables_enforcement() -> None: + """CLI override disables hook enforcement even when config says True.""" + ep = _make_entry_point("auto", _AutoDiscoverPlugin) + + set_enforce_hooks_override(disable=True) + try: + with ( + patch("reeln.plugins.loader.importlib.metadata.entry_points", return_value=[ep]), + patch("reeln.plugins.loader._fetch_registry_capabilities") as mock_fetch, + ): + result = activate_plugins(PluginsConfig(enabled=["auto"], enforce_hooks=True)) + + # Registry fetch should NOT have been called despite enforce_hooks=True + mock_fetch.assert_not_called() + assert "auto" in result + finally: + set_enforce_hooks_override(disable=False) + reset_registry() + + +def test_set_enforce_hooks_override_reset_restores_enforcement() -> None: + """Re-enabling enforcement restores the default behavior.""" + ep = _make_entry_point("test", _NoConfigPlugin) + + set_enforce_hooks_override(disable=True) + set_enforce_hooks_override(disable=False) + + with ( + patch("reeln.plugins.loader.importlib.metadata.entry_points", return_value=[ep]), + patch("reeln.plugins.loader._fetch_registry_capabilities", return_value={}) as mock_fetch, + ): + activate_plugins(PluginsConfig(enabled=["test"], enforce_hooks=True)) + + mock_fetch.assert_called_once() + reset_registry() + + +def test_detect_capabilities_includes_doctor() -> None: + """Plugins with doctor_checks() are detected as having the doctor capability.""" + + class PluginWithDoctor: + name = "test" + + def doctor_checks(self) -> list[object]: + return [] + + caps = _detect_capabilities(PluginWithDoctor()) + assert "doctor" in caps From 1cdd24c5fe147f49cafa54143eeefe104dfd8658 Mon Sep 17 00:00:00 2001 From: jremitz Date: Tue, 24 Mar 2026 07:24:17 -0500 Subject: [PATCH 2/4] fix: add py.typed marker for mypy type checking in CI Co-Authored-By: Claude --- reeln/py.typed | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 reeln/py.typed diff --git a/reeln/py.typed b/reeln/py.typed new file mode 100644 index 0000000..e69de29 From 87d3faa901402da540c0f3574ed8a7f36bdcca35 Mon Sep 17 00:00:00 2001 From: jremitz Date: Tue, 24 Mar 2026 07:24:45 -0500 Subject: [PATCH 3/4] fix: use text lexer for CSV block in docs (csv not recognized by Pygments) Co-Authored-By: Claude --- docs/guide/configuration.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guide/configuration.md b/docs/guide/configuration.md index 7da3b32..7d65d0f 100644 --- a/docs/guide/configuration.md +++ b/docs/guide/configuration.md @@ -451,7 +451,7 @@ Team profiles are stored as JSON files in the config directory under `teams/{lev 2. **Create the roster CSV** with `number`, `name`, and `position` columns: -```csv +```text number,name,position 48,John Smith,C 24,Jane Doe,D From a55c7b61ef718aaae4dff506048a8fd527efc1c3 Mon Sep 17 00:00:00 2001 From: jremitz Date: Tue, 24 Mar 2026 07:30:57 -0500 Subject: [PATCH 4/4] fix: include branding module and template that were missed in staging Co-Authored-By: Claude --- reeln/core/branding.py | 62 +++++++++++++++++ reeln/data/templates/branding.ass | 12 ++++ tests/unit/core/test_branding.py | 108 ++++++++++++++++++++++++++++++ 3 files changed, 182 insertions(+) create mode 100644 reeln/core/branding.py create mode 100644 reeln/data/templates/branding.ass create mode 100644 tests/unit/core/test_branding.py diff --git a/reeln/core/branding.py b/reeln/core/branding.py new file mode 100644 index 0000000..0c1e6b9 --- /dev/null +++ b/reeln/core/branding.py @@ -0,0 +1,62 @@ +"""Branding overlay resolution and context building.""" + +from __future__ import annotations + +import logging +import os +import tempfile +from pathlib import Path + +import reeln +from reeln.core.errors import RenderError +from reeln.core.log import get_logger +from reeln.core.templates import format_ass_time, render_template_file +from reeln.models.branding import BrandingConfig +from reeln.models.template import TemplateContext + +log: logging.Logger = get_logger(__name__) + + +def build_branding_context(duration: float) -> TemplateContext: + """Build template context for the branding overlay. + + Provides ``version`` (from ``reeln.__version__``) and + ``branding_end`` (ASS-formatted end timestamp). + """ + return TemplateContext( + variables={ + "version": f"v{reeln.__version__}", + "branding_end": format_ass_time(duration), + } + ) + + +def resolve_branding(config: BrandingConfig, output_dir: Path) -> Path | None: + """Resolve and render the branding template to a temp ``.ass`` file. + + Returns ``None`` when branding is disabled. The caller is + responsible for cleaning up the returned temp file. + """ + if not config.enabled: + return None + + ctx = build_branding_context(config.duration) + + if config.template.startswith("builtin:"): + from reeln.core.overlay import resolve_builtin_template + + template_name = config.template.removeprefix("builtin:") + template_path = resolve_builtin_template(template_name) + else: + template_path = Path(config.template).expanduser() + + rendered = render_template_file(template_path, ctx) + fd, tmp_path = tempfile.mkstemp(suffix=".ass", dir=str(output_dir)) + os.close(fd) + tmp = Path(tmp_path) + try: + tmp.write_text(rendered, encoding="utf-8") + except OSError as exc: + tmp.unlink(missing_ok=True) + raise RenderError(f"Failed to write rendered branding: {exc}") from exc + return tmp diff --git a/reeln/data/templates/branding.ass b/reeln/data/templates/branding.ass new file mode 100644 index 0000000..3ea3b1a --- /dev/null +++ b/reeln/data/templates/branding.ass @@ -0,0 +1,12 @@ +[Script Info] +ScriptType: v4.00+ +PlayResX: 1080 +PlayResY: 1920 + +[V4+ Styles] +Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding +Style: BrandText, Inter, 26, &H00FFFFFF, &H00FFFFFF, &H00000000, &H80000000, 1,0,0,0,100,100,1,0,1,3,2,8,0,0,0,1 + +[Events] +Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text +Dialogue: 0,0:00:00.00,{{branding_end}},BrandText,,0,0,0,,{\fad(300,800)\an8\pos(540,100)}reeln {{version}} by https://streamn.dad diff --git a/tests/unit/core/test_branding.py b/tests/unit/core/test_branding.py new file mode 100644 index 0000000..834987d --- /dev/null +++ b/tests/unit/core/test_branding.py @@ -0,0 +1,108 @@ +"""Tests for branding overlay resolution and context building.""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import patch + +import pytest + +import reeln +from reeln.core.branding import build_branding_context, resolve_branding +from reeln.core.errors import RenderError +from reeln.core.templates import format_ass_time +from reeln.models.branding import BrandingConfig + +# --------------------------------------------------------------------------- +# build_branding_context +# --------------------------------------------------------------------------- + + +class TestBuildBrandingContext: + def test_version_included(self) -> None: + ctx = build_branding_context(3.0) + assert ctx.get("version") == f"v{reeln.__version__}" + + def test_branding_end_format(self) -> None: + ctx = build_branding_context(3.0) + assert ctx.get("branding_end") == format_ass_time(3.0) + + def test_custom_duration(self) -> None: + ctx = build_branding_context(5.0) + assert ctx.get("branding_end") == format_ass_time(5.0) + + def test_zero_duration(self) -> None: + ctx = build_branding_context(0.0) + assert ctx.get("branding_end") == format_ass_time(0.0) + + +# --------------------------------------------------------------------------- +# resolve_branding +# --------------------------------------------------------------------------- + + +class TestResolveBranding: + def test_disabled_returns_none(self, tmp_path: Path) -> None: + config = BrandingConfig(enabled=False) + result = resolve_branding(config, tmp_path) + assert result is None + + def test_builtin_template_renders(self, tmp_path: Path) -> None: + config = BrandingConfig() + result = resolve_branding(config, tmp_path) + assert result is not None + assert result.is_file() + assert result.suffix == ".ass" + content = result.read_text(encoding="utf-8") + assert f"v{reeln.__version__}" in content + result.unlink() + + def test_builtin_template_contains_fade(self, tmp_path: Path) -> None: + config = BrandingConfig() + result = resolve_branding(config, tmp_path) + assert result is not None + content = result.read_text(encoding="utf-8") + assert "\\fad(300,800)" in content + result.unlink() + + def test_builtin_template_contains_branding_end(self, tmp_path: Path) -> None: + config = BrandingConfig(duration=5.0) + result = resolve_branding(config, tmp_path) + assert result is not None + content = result.read_text(encoding="utf-8") + assert format_ass_time(5.0) in content + result.unlink() + + def test_custom_template(self, tmp_path: Path) -> None: + template = tmp_path / "custom.ass" + template.write_text( + "[Script Info]\nScriptType: v4.00+\n\n[Events]\n" + "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n" + "Dialogue: 0,0:00:00.00,{{branding_end}},Default,,0,0,0,,custom {{version}}\n", + encoding="utf-8", + ) + config = BrandingConfig(template=str(template)) + result = resolve_branding(config, tmp_path) + assert result is not None + content = result.read_text(encoding="utf-8") + assert f"v{reeln.__version__}" in content + assert format_ass_time(5.0) in content + result.unlink() + + def test_missing_custom_template(self, tmp_path: Path) -> None: + config = BrandingConfig(template="/nonexistent/branding.ass") + with pytest.raises(RenderError, match="Template file not found"): + resolve_branding(config, tmp_path) + + def test_missing_builtin_template(self, tmp_path: Path) -> None: + config = BrandingConfig(template="builtin:nonexistent") + with pytest.raises(RenderError, match="Builtin template not found"): + resolve_branding(config, tmp_path) + + def test_write_failure(self, tmp_path: Path) -> None: + config = BrandingConfig() + with ( + patch("reeln.core.branding.Path.write_text", side_effect=OSError("disk full")), + pytest.raises(RenderError, match="Failed to write rendered branding"), + ): + resolve_branding(config, tmp_path)