diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000..3459047 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,48 @@ +# This file was created automatically with `jupyter-book init --gh-pages` 🪄 💚 +# Ensure your GitHub Pages settings for this repository are set to deploy with **GitHub Actions**. + +name: Jupyter Book (via myst) GitHub Pages Deploy +on: + push: + # Runs on pushes targeting the default branch + branches: [mmaster] +env: + # `BASE_URL` determines, relative to the root of the domain, the URL that your site is served from. + # E.g., if your site lives at `https://mydomain.org/myproject`, set `BASE_URL=/myproject`. + # If, instead, your site lives at the root of the domain, at `https://mydomain.org`, set `BASE_URL=''`. + BASE_URL: /${{ github.event.repository.name }} + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: 'pages' + cancel-in-progress: false +jobs: + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup Pages + uses: actions/configure-pages@v3 + - uses: actions/setup-node@v4 + with: + node-version: 18.x + - name: Install Jupyter Book (via myst) + run: npm install -g jupyter-book + - name: Build HTML Assets + run: jupyter-book build --html + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: './_build/html' + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 diff --git a/.gitignore b/.gitignore index 7c5b18d..7480fdc 100644 --- a/.gitignore +++ b/.gitignore @@ -113,3 +113,6 @@ venv.bak/ !dlclivegui/config.py # uv package files uv.lock + +# Built jupyter-book +docs/_build/ diff --git a/docs/_static/custom.css b/docs/_static/custom.css new file mode 100644 index 0000000..63f13ac --- /dev/null +++ b/docs/_static/custom.css @@ -0,0 +1,138 @@ +/* ------------- Header background + text ------------- */ +:root { + --dlc-header-bg: rgba(149, 72, 156, 0.85); /* #de6be8 with alpha */ +} +html.dark { + --dlc-header-bg: rgba(69, 83, 100, 0.85); /* #455364 with alpha */ +} +/* Header background (frosted) */ +.myst-top-nav { + background-color: var(--dlc-header-bg) !important; + backdrop-filter: blur(8px) !important; + -webkit-backdrop-filter: blur(8px) !important; +} + +/* Dark mode (Tailwind-style: .dark on an ancestor is common in MyST themes) */ +html.dark .myst-top-nav { + background-color: var(--dlc-header-bg) !important; + backdrop-filter: blur(8px) !important; + -webkit-backdrop-filter: blur(8px) !important; +} + +/* Make header links inherit header text color */ +.myst-top-nav a { + color: inherit !important; +} + + + +/* 1) Keep header as-is; only fix logo container */ +html.dark .myst-top-nav nav a > div { + background: transparent !important; + box-shadow: none !important; +} + +/* Optional: also remove padding/border-radius if the wrapper looks like a badge */ +html.dark .myst-top-nav nav a > div { + border: 0 !important; +} + + +/* ========================================================= + MyST code blocks (global) + Targets the exact structure you pasted: + div.myst-code > pre.myst-code-body.hljs > code.language-* + ========================================================= */ + +:root { + /* Light mode */ + --dlc-code-bg: rgba(149, 72, 156, 0.05); + --dlc-code-fg: #111827; + --dlc-code-border: #e5e7eb; + --dlc-code-shadow: 0 1px 3px rgba(0,0,0,0.08); + + /* Accents */ + --dlc-code-accent: #95489c; + --dlc-code-radius: 12px; +} + +html.dark { + /* Dark mode */ + --dlc-code-bg: rgba(69, 83, 100, 0.45); + --dlc-code-fg: #e5e7eb; + --dlc-code-border: rgba(229,231,235,0.14); + --dlc-code-shadow: 0 8px 30px rgba(0,0,0,0.45); + + --dlc-code-accent: #c77dd1; +} + +/* The outer wrapper (controls spacing + frame) */ +.myst-code { + border: 1px solid var(--dlc-code-border); + border-radius: var(--dlc-code-radius); + background: var(--dlc-code-bg); + box-shadow: var(--dlc-code-shadow); + overflow: hidden; /* makes rounded corners clip */ +} + +/* The
 element you showed: pre.myst-code-body.hljs */
+.myst-code-body.hljs {
+  background-color: var(--dlc-code-bg) !important; /* overrides inline style "unset" */
+  color: var(--dlc-code-fg) !important;
+
+  margin: 0 !important;
+  padding: 0.9rem 1rem !important;
+  line-height: 1.45;
+  tab-size: 4;
+
+  /* Horizontal scroll for long lines */
+  overflow: auto;
+}
+
+/* Avoid any extra styling on the  tag */
+.myst-code-body code {
+  background: transparent !important;
+  color: inherit !important;
+  padding: 0 !important;
+}
+
+/* Base text inside code blocks */
+/* .myst-code-body.hljs {
+  color: var(--dlc-code-fg) !important;
+} */
+
+/* Token colors (HLJS) */
+/* .myst-code-body .hljs-built_in,
+.myst-code-body .hljs-keyword,
+.myst-code-body .hljs-selector-tag,
+.myst-code-body .hljs-literal {
+  color: #95489c;
+}
+
+
+.myst-code-body .hljs-string,
+.myst-code-body .hljs-title,
+.myst-code-body .hljs-attr,
+.myst-code-body .hljs-attribute {
+  color: #059669;
+}
+
+
+
+.myst-code-body .hljs-number,
+.myst-code-body .hljs-symbol,
+.myst-code-body .hljs-bullet {
+  color: #f59e0b;
+}
+
+.myst-code-body .hljs-comment,
+.myst-code-body .hljs-quote {
+  color: rgba(100,116,139,0.85);
+  font-style: italic;
+}
+
+html.dark .myst-code-body .hljs-comment,
+html.dark .myst-code-body .hljs-quote {
+  color: rgba(148,163,184,0.75);
+}
+*/
diff --git a/docs/_static/images/logo.png b/docs/_static/images/logo.png
new file mode 100644
index 0000000..ec77b4a
Binary files /dev/null and b/docs/_static/images/logo.png differ
diff --git a/docs/_static/images/main_window/main_window_startup.png b/docs/_static/images/main_window/main_window_startup.png
new file mode 100644
index 0000000..a59e38e
Binary files /dev/null and b/docs/_static/images/main_window/main_window_startup.png differ
diff --git a/docs/aravis_backend.md b/docs/cameras_backends/aravis_backend.md
similarity index 100%
rename from docs/aravis_backend.md
rename to docs/cameras_backends/aravis_backend.md
diff --git a/docs/camera_support.md b/docs/cameras_backends/camera_support.md
similarity index 100%
rename from docs/camera_support.md
rename to docs/cameras_backends/camera_support.md
diff --git a/docs/index.md b/docs/index.md
new file mode 100644
index 0000000..e899bb1
--- /dev/null
+++ b/docs/index.md
@@ -0,0 +1,85 @@
+# DeepLabCut Live GUI
+
+A graphical application for **real-time pose estimation with DeepLabCut** using one or more cameras.
+
+This GUI is designed for **scientists and experimenters** who want to preview, run inference, and record synchronized video with pose overlays—without writing code.
+
+## Table of Contents
+
+:::{toc}
+:::
+
+---
+
+## What this software does
+
+- **Live camera preview** from one or multiple cameras
+- **Real-time pose inference** using DeepLabCut Live models
+- **Multi-camera support** with tiled display
+- **Video recording** (raw or with pose and bounding-box overlays)
+- **Session-based data organization** with reproducible naming
+- **Optional processor plugins** to extend behavior (e.g. remote control, triggers)
+
+The application is built with **PySide6 (Qt)** and is intended for interactive experimental use rather than offline batch processing.
+
+---
+
+## Typical workflow
+
+1. **Install** the application and required camera backends
+2. **Configure cameras** (single or multi-camera)
+3. **Select a DeepLabCut Live model**
+4. **Start preview** and verify frame rate
+5. **Run pose inference** on a selected camera
+6. **Record video** (optionally with overlays)
+7. **Organize results** by session and run
+
+Each of these steps is covered in the *Quickstart* and *User Guide* sections of this documentation.
+
+---
+
+## Who this is for
+
+- Neuroscience and behavior labs
+- Experimentalists running real-time tracking
+- Users who want a **GUI-first** workflow for DeepLabCut Live
+
+You do **not** need to be a software developer to use this tool.
+
+---
+
+## What this documentation covers
+
+- Installation and first-run setup
+- Camera configuration and supported backends
+- Pose inference settings and visualization
+- Recording options and file organization
+- Known limitations of the current release
+
+This documentation intentionally focuses on **end-user operation**.
+Developer-oriented material (APIs, internals, extension points) is out of scope for now.
+
+---
+
+## Current limitations (high-level)
+
+Before getting started, be aware of the following constraints:
+
+- Pose inference runs on **one selected camera at a time** (even in multi-camera mode)
+- Camera synchronization depends on backend capabilities and hardware
+- DeepLabCut Live models must be **exported and compatible** with the selected backend
+- Performance depends on camera resolution, frame rate, GPU availability, and codec choice
+
+A detailed and up-to-date list is maintained in the **Limitations** section.
+
+
+---
+
+## About DeepLabCut Live
+
+DeepLabCut Live enables low-latency, real-time pose estimation using models trained with DeepLabCut.
+This GUI provides an accessible interface on top of that ecosystem for interactive experiments.
+
+---
+
+*This project is under active development. Feedback from real experimental use is highly valued.*
diff --git a/docs/myst.yml b/docs/myst.yml
new file mode 100644
index 0000000..fa9fe0f
--- /dev/null
+++ b/docs/myst.yml
@@ -0,0 +1,50 @@
+# See: https://mystmd.org/guide/frontmatter
+version: 1
+
+project:
+  title: DeepLabCut Live GUI
+  description: PySide6-based GUI for real-time DeepLabCut experiments
+  authors:
+    - name: A. & M. Mathis Labs
+  license: LGPL-3.0
+  github: DeepLabCut/DeepLabCut-live-GUI
+  toc:
+    - file: index.md
+    - title: Quickstart
+      children:
+        - file: quickstart/install
+    - title: User Guide
+      children:
+        - file: user_guide/overview
+      #     - file: quickstart/camera_setup
+      #     - file: quickstart/inference_setup
+      #     - file: quickstart/first_run
+
+      #     - file: user_guide/cameras/index
+      #       children:
+      #         - file: user_guide/cameras/configure_dialog
+      #         - file: user_guide/cameras/supported_backends
+
+      #     - file: user_guide/inference/index
+      #       children:
+      #         - file: user_guide/inference/model_selection
+      #         - file: user_guide/inference/inference_camera
+      #         - file: user_guide/inference/processor_plugins
+
+      #     - file: user_guide/visualization/index
+      #       children:
+      #         - file: user_guide/visualization/preview_and_overlays
+
+      #     - file: user_guide/recording/index
+      #       children:
+      #         - file: user_guide/recording/output_structure
+
+      # - title: Limitations
+      #   children:
+      #     - file: limitations/current_limitations
+
+site:
+  template: book-theme
+  options:
+    logo: _static/images/logo.png
+    style: _static/custom.css
diff --git a/docs/quickstart/install.md b/docs/quickstart/install.md
new file mode 100644
index 0000000..63f458d
--- /dev/null
+++ b/docs/quickstart/install.md
@@ -0,0 +1,93 @@
+# Installation
+
+This page explains how to install **DeepLabCut Live GUI** for interactive, real‑time experiments.
+
+We **recommend `uv`** for most users because it is fast, reliable, and handles optional dependencies cleanly.
+**Conda is also supported**, especially if you already use it for DeepLabCut or GPU workflows.
+
+---
+
+## System requirements
+
+- **Python ≥ 3.10**
+- A working camera backend (see *User Guide → Cameras*)
+- Optional but recommended:
+  - **GPU with CUDA** (for real‑time inference)
+  - NVIDIA drivers compatible with your PyTorch/TensorFlow version
+
+---
+
+## Recommended: Install with `uv`
+
+We recommend installing with [`uv`](https://github.com/astral-sh/uv),
+but also support installation with `pip` or `conda` (see next section).
+
+
+### Create and activate a new environment
+
+::::{tab-set}
+:::{tab-item} Linux / macOS
+```bash
+uv venv create dlclivegui
+source uv venv activate dlclivegui
+```
+:::
+
+:::{tab-item} Windows (Command Prompt)
+```cmd
+uv venv create dlclivegui
+.\dlclivegui\Scripts\activate.bat
+```
+:::
+
+:::{tab-item} Windows (PowerShell)
+```powershell
+uv venv create dlclivegui
+.\dlclivegui\Scripts\Activate.ps1
+```
+:::
+::::
+
+### Install DeepLabCut-Live-GUI
+
+As the package is not currently on PyPI, install directly from GitHub:
+
+
+```bash
+git clone https://github.com/DeepLabCut/DeepLabCut-live-GUI.git
+cd DeepLabCut-live-GUI
+```
+
+We offer two distinct inference backends:
+
+:::::{tab-set}
+
+::::{tab-item} PyTorch
+```bash
+uv pip install -e .[pytorch]
+```
+:::{note}
+For detailed installation instructions, please refer to the [official PyTorch installation guide](https://pytorch.org/get-started/locally/).
+:::
+::::
+
+::::{tab-item} TensorFlow
+:::{caution}
+Please note TensorFlow is no longer available on Windows for Python > 3.10.
+:::
+```bash
+uv pip install -e .[tensorflow]
+```
+:::{note}
+For detailed installation instructions, please refer to the [official TensorFlow installation guide](https://www.tensorflow.org/install/pip).
+:::
+::::
+:::::
+
+### Run the application
+
+After installation, you can start the DeepLabCut Live GUI application with:
+
+```bash
+uv run dlclivegui
+```
diff --git a/docs/user_guide/overview.md b/docs/user_guide/overview.md
new file mode 100644
index 0000000..8476072
--- /dev/null
+++ b/docs/user_guide/overview.md
@@ -0,0 +1,212 @@
+# Overview
+
+DeepLabCut Live GUI is a **PySide6-based desktop application** for running real-time DeepLabCut pose estimation experiments with **one or multiple cameras**, optional **processor plugins**, and **video recording** (with or without overlays).
+
+This page gives you a **guided tour of the main window**, explains the **core workflow**, and introduces the key concepts used throughout the user guide.
+
+---
+
+## Main Window at a Glance
+
+When you launch the application (`dlclivegui`), you will see:
+
+- A **Controls panel** (left) for configuring cameras, inference, recording, and overlays
+- A **Video panel** (right) showing the live preview (single or tiled multi-camera)
+- A **Stats area** (below the video) summarizing camera, inference, and recorder performance
+- A **Status bar** (bottom) for short messages and warnings
+
+---
+
+## Typical Workflow (Recommended)
+
+Most users will follow this sequence:
+
+1. **Configure Cameras**
+   Use **Configure Cameras…** to select one or more cameras and their parameters.
+
+2. **Start Preview**
+   Click **Start Preview** to begin streaming.
+   - If multiple cameras are active, the preview becomes a **tiled view**.
+
+3. *(If ready)* **Start Pose Inference**
+   Choose a **Model file**, optional **Processor**, select the **Inference Camera**, then click **Start pose inference**.
+   - Toggle **Display pose predictions** to show or hide overlays.
+
+4. *(If ready)* **Start Recording**
+   Choose an **Output directory**, session/run naming options, and encoding settings, then click **Start recording**.
+   - Recording supports **all active cameras** in multi-camera mode.
+
+5. **Stop**
+   Use **Stop Preview**, **Stop pose inference**, and/or **Stop recording** as needed.
+
+```{note}
+Pose inference requires the camera preview to be running.
+If you start pose inference while preview is stopped, the GUI will automatically start the preview first.
+```
+
+---
+
+## Main control panel
+
+:::{figure} ../_static/images/main_window/main_window_startup.png
+:label: fig:main_window_startup
+:alt: Screenshot of the main window
+:width: 100%
+:align: center
+
+   The main window on startup, showing the Controls panel (left), Video panel (right), and Stats area (below video).
+:::
+
+
+### Camera settings
+
+**Purpose:** Define which cameras are available and active.
+
+- **Configure Cameras…**
+  Opens the camera configuration dialog where you can:
+  - Add, enable, or disable cameras
+  - Select backend and index
+  - Adjust camera-specific properties
+  - Switch between single- and multi-camera setups
+
+- **Active**
+  Displays a summary of configured cameras:
+  - **Single camera:** `Name [backend:index] @ fps`
+  - **Multiple cameras:** `N cameras: camA, camB, …`
+
+```{important}
+In multi-camera mode, pose inference runs on **one selected camera at a time** (the *Inference Camera*),
+even though preview and recording may include multiple cameras.
+```
+
+---
+
+### DLCLive settings
+
+**Purpose:** Configure and run pose inference on the live stream.
+
+- **Model file**
+  Path to an exported DeepLabCut-Live model file (e.g. `.pt`, `.pb`).
+
+- **Processor folder / Processor** *(optional)*
+  Processor plugins extend functionality (for example, experiment logic or external control).
+  This allows for e.g. closed-loop control of devices based on pose estimation results.
+
+- **Inference Camera**
+  Select which active camera is used for pose inference.
+  In multi-camera preview, pose overlays are drawn only on the corresponding tile.
+
+- **Start pose inference / Stop pose inference**
+  The button indicates inference state:
+  - *Initializing DLCLive!* → Model loading
+  - *DLCLive running!* → Inference active
+
+- **Display pose predictions**
+  Toggle visualization of predicted keypoints.
+
+- **Auto-record video on processor command** *(optional)*
+  Allows compatible processors to start and stop recording automatically.
+
+- **Processor Status**
+  Displays processor-specific status information when available.
+
+---
+
+### Recording
+
+**Purpose:** Save videos from active cameras !
+
+Core settings:
+- **Output directory**: Base directory for all recordings
+- **Session name**: Logical grouping of runs (e.g. `mouseA_day1`)
+- **Use timestamp for run folder name**:
+  - Enabled → `run_YYYYMMDD_HHMMSS_mmm`
+  - Disabled → `run_0001`, `run_0002`, …
+
+A live preview label shows the *approximate* output path, including camera placeholders.
+
+Encoding options:
+- **Container** (e.g. `mp4`, `avi`, `mov`)
+- **Codec** (availability depends on OS and hardware)
+- **CRF** (quality/compression tradeoff; lower values = higher quality)
+
+Additional controls:
+- **Record video with overlays**
+  Include pose predictions and/or bounding boxes directly in the recorded video.
+  :::{caution}
+  This **cannot be easily undone** once the recording is saved.
+ Use with caution if you want to preserve raw footage. + ::: +- **Start recording / Stop recording** +- **Open recording folder** + +--- + +### Bounding Box Visualization + +**Purpose:** Show a bounding box around the detected subject. + +- **Show bounding box**: Enable or disable overlay +- **Coordinates**: `x0`, `y0`, `x1`, `y1` + +In multi-camera mode, the bounding box is applied relative to the **inference camera tile**, ensuring correct alignment in tiled previews. + +--- + +## Video Panel and Stats + +### Video preview + +- Displays a logo screen when idle +- Shows live video when preview is running +- Uses a tiled layout automatically when multiple cameras are active + +### Stats panel + +Three continuously updated sections: + +- **Camera**: Per-camera measured frame rate +- **DLC Processor**: Inference throughput, latency, queue depth, and dropped frames +- **Recorder**: Recording status and write performance + +```{tip} +Stats text can be selected and copied directly, which is useful for debugging or reporting performance issues. +``` + +--- + +## Menus + +### File +- Load configuration… +- Save configuration +- Save configuration as… +- Open recording folder +- Close window + +### View → Appearance +- System theme +- Dark theme + +--- + +## Configuration and Persistence + +The GUI can restore settings across sessions using: + +- Explicitly saved **JSON configuration files** +- A stored snapshot of the most recent configuration +- Remembered paths (e.g. last-used model directory) + +```{tip} +For reproducible experiments, prefer saving and versioning JSON configuration files +rather than relying only on remembered application state. +``` + +--- + +## Next Steps + +- **Camera Setup**: Add and validate cameras, start preview +- **Inference Setup**: Select a model, start pose inference, interpret performance metrics +- **First Recording**: Understand session/run structure and verify recorded output diff --git a/pyproject.toml b/pyproject.toml index 0f5e02e..09e09cb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,9 @@ pytorch = [ tf = [ "deeplabcut-live[tf]", ] +docs = [ + "jupyter-book>=2.0" +] dev = [ "pytest>=7.0", "pytest-cov>=4.0",