diff --git a/.github/workflows/playwright-tests.yml b/.github/workflows/playwright-tests.yml new file mode 100644 index 0000000..551b775 --- /dev/null +++ b/.github/workflows/playwright-tests.yml @@ -0,0 +1,34 @@ +name: Playwright E2E Tests + +on: + workflow_dispatch: + +jobs: + playwright-e2e: + runs-on: ubuntu-latest + permissions: + contents: read + + steps: + - uses: actions/checkout@v4 + + - name: Build Docker images + run: docker compose build + + - name: Run Playwright E2E tests + # `docker compose run` starts the declared dependencies (homeassistant, + # ssh_docker_test) and then runs the playwright-tests container. + # The exit code of the run command mirrors the test container's exit code. + run: docker compose run --rm playwright-tests + + - name: Stop services + if: always() + run: docker compose down -v + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: playwright-e2e-results + path: playwright-results/ + if-no-files-found: ignore diff --git a/.gitignore b/.gitignore index 588ff39..bf8b07e 100644 --- a/.gitignore +++ b/.gitignore @@ -3,3 +3,4 @@ __pycache__/ /htmlcov/ /.coverage custom_components/ +playwright-results/ diff --git a/coordinator.py b/coordinator.py index 04fcc88..1aac8ee 100644 --- a/coordinator.py +++ b/coordinator.py @@ -71,6 +71,7 @@ async def async_execute(self, data: dict[str, Any]) -> dict[str, Any]: CONF_PASSWORD: password, CONF_CLIENT_KEYS: key_file, CONF_KNOWN_HOSTS: await self._resolve_known_hosts(check_known_hosts, known_hosts), + "connect_timeout": timeout, } run_kwargs: dict[str, Any] = { diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..0d3e298 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,88 @@ +services: + + # ── Home Assistant ────────────────────────────────────────────────────────── + homeassistant: + image: ghcr.io/home-assistant/home-assistant:stable + container_name: homeassistant_test + volumes: + # Persistent HA config (survives container restarts; start fresh with + # `docker compose down -v`). + - ha_config:/config + # Mount the integration source as a custom component so HA loads it on + # startup without any extra copy step. + - ./:/config/custom_components/ssh_command:ro + # Startup wrapper that pre-populates /etc/hosts before launching HA. + # Alpine Linux (musl libc) cannot resolve Docker container hostnames via + # Python's socket module because of iptables/UDP limitations in this + # environment. The wrapper uses busybox nslookup (which works) to add + # entries to /etc/hosts so that all resolver calls succeed via the + # "files" nsswitch path. + - ./tests/playwright/ha-init-wrapper.sh:/ha-init-wrapper.sh:ro + # SSH test-key data written by ssh_docker_test_1 at startup. + # Provides the user auth private key and a known_hosts file so that + # key-file and known-hosts E2E tests can reference them by path. + - ssh_test_init:/ssh-test-keys:ro + environment: + - TZ=UTC + entrypoint: ["/bin/sh", "/ha-init-wrapper.sh"] + # Clear the external search domain that musl's resolver would try first, + # which causes timeouts in this Azure-hosted environment. + dns_search: "." + restart: unless-stopped + + # ── SSH test servers ──────────────────────────────────────────────────────── + # Two identical Ubuntu-based containers each run a single sshd on port 22 + # (the SSH default). The Home Assistant integration connects to port 22 by + # default, so no port mapping is required. + # Credentials: user=foo password=pass + ssh_docker_test_1: + build: + context: tests/playwright + dockerfile: Dockerfile.ssh + container_name: ssh_docker_test_1 + environment: + # Injected into the startup script so the known_hosts entry uses the + # correct hostname rather than the container's random short hostname. + - CONTAINER_NAME=ssh_docker_test_1 + volumes: + # Shared with the HA container (read-only) at /ssh-test-keys so tests + # can reference /ssh-test-keys/id_ed25519 and /ssh-test-keys/known_hosts. + - ssh_test_init:/ssh-init-data + + ssh_docker_test_2: + build: + context: tests/playwright + dockerfile: Dockerfile.ssh + container_name: ssh_docker_test_2 + + # ── Playwright E2E test runner ────────────────────────────────────────────── + # Not started by default (`docker compose up`); invoke explicitly: + # docker compose run --rm playwright-tests + playwright-tests: + build: + context: . + dockerfile: tests/playwright/Dockerfile + environment: + - HOMEASSISTANT_URL=http://homeassistant:8123 + - SSH_HOST_1=ssh_docker_test_1 + - SSH_HOST_2=ssh_docker_test_2 + - SSH_USER=foo + - SSH_PASSWORD=pass + - HA_USERNAME=admin + - HA_PASSWORD=admin + volumes: + # Test results (JUnit XML) written here are available on the host after + # the container exits, e.g. for CI artifact upload. + - ./playwright-results:/app/playwright-results + depends_on: + - homeassistant + - ssh_docker_test_1 + - ssh_docker_test_2 + +volumes: + ha_config: + # Populated by ssh_docker_test_1 at container startup; mounted read-only + # into the HA container at /ssh-test-keys so that key-file and known-hosts + # E2E tests can access the credentials by path. + ssh_test_init: + diff --git a/run_playwright_tests.sh b/run_playwright_tests.sh new file mode 100755 index 0000000..480c47b --- /dev/null +++ b/run_playwright_tests.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash +# run_playwright_tests.sh +# +# Runs the Playwright E2E test suite in a fully isolated Docker environment. +# No local Python environment or browser installation is required. +# +# The suite spins up Home Assistant, two SSH test servers, and the Playwright +# test runner via docker compose, then tears everything down on exit. +# +# Usage: +# ./run_playwright_tests.sh + +set -euo pipefail + +# ── Colour helpers ──────────────────────────────────────────────────────────── +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +BOLD='\033[1m' +NC='\033[0m' + +info() { echo -e "${BLUE}[INFO]${NC} $*"; } +success() { echo -e "${GREEN}[PASS]${NC} $*"; } +warn() { echo -e "${YELLOW}[WARN]${NC} $*"; } +error() { echo -e "${RED}[FAIL]${NC} $*"; } +header() { echo -e "\n${BOLD}$*${NC}"; } + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COMPOSE_FILE="$SCRIPT_DIR/docker-compose.yaml" + +# ── Resolve docker compose command ─────────────────────────────────────────── +get_compose_cmd() { + if command -v docker &>/dev/null && sudo docker compose version &>/dev/null 2>&1; then + echo "sudo docker compose" + else + error "docker compose is not available. Please install Docker with the Compose plugin." + exit 1 + fi +} + +# ── Main ────────────────────────────────────────────────────────────────────── +main() { + if [[ $# -gt 0 ]]; then + error "This script takes no arguments." + echo "Usage: $0" + exit 1 + fi + + if [[ ! -f "$COMPOSE_FILE" ]]; then + error "docker-compose.yaml not found at $COMPOSE_FILE" + exit 1 + fi + + header "════════════════════════════════════════════════════" + header " Playwright E2E tests (docker compose)" + header "════════════════════════════════════════════════════" + + local compose_cmd + compose_cmd="$(get_compose_cmd)" + + info "Building Docker images…" + $compose_cmd -f "$COMPOSE_FILE" build + + info "Running test container (this may take several minutes on first run)…" + local exit_code=0 + $compose_cmd -f "$COMPOSE_FILE" run --rm playwright-tests || exit_code=$? + + info "Stopping services…" + $compose_cmd -f "$COMPOSE_FILE" down -v || true + + if [[ $exit_code -eq 0 ]]; then + echo "" + success "All Playwright E2E tests passed." + exit 0 + else + echo "" + error "Playwright E2E tests failed (exit code ${exit_code})." + exit "${exit_code}" + fi +} + +main "$@" diff --git a/run_tests.sh b/run_tests.sh deleted file mode 100755 index e63db2f..0000000 --- a/run_tests.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -coverage run --omit='tests/unit_tests/*' -m unittest discover -s tests/unit_tests; coverage html diff --git a/run_workflows_locally.sh b/run_workflows_locally.sh index d4b4304..26ebaab 100755 --- a/run_workflows_locally.sh +++ b/run_workflows_locally.sh @@ -108,8 +108,29 @@ run_workflow() { fi } +# ── Playwright E2E tests via docker compose ─────────────────────────────────── +# The playwright-tests.yml workflow uses `docker compose run` internally, which +# requires a real Docker daemon. act (Docker-in-Docker) cannot reliably run +# that workflow, so we delegate to the dedicated run_playwright_tests.sh script. +run_playwright_tests() { + local script="$SCRIPT_DIR/run_playwright_tests.sh" + + if [[ ! -f "$script" ]]; then + warn "run_playwright_tests.sh not found – skipping Playwright E2E tests." + return 1 + fi + + if bash "$script"; then + success "playwright-tests.yml passed" + return 0 + else + error "playwright-tests.yml failed" + return 1 + fi +} + run_all_workflows() { - # Only workflows that run entirely locally (tests and linting). + # Only act-compatible workflows (no Docker-in-Docker requirement). # Workflows that depend on GitHub infrastructure (hassfest, HACS validation, # release) are silently omitted. local workflow_files=( @@ -144,6 +165,13 @@ run_all_workflows() { fi done + # ── Playwright E2E tests (docker compose, not act) ──────────────────────── + if run_playwright_tests; then + passed+=("playwright-tests.yml") + else + failed+=("playwright-tests.yml") + fi + # ── Summary ─────────────────────────────────────────────────────────────── header "══════════════════════════════════════════════" header " Results" diff --git a/tests/playwright/Dockerfile b/tests/playwright/Dockerfile new file mode 100644 index 0000000..cfd782f --- /dev/null +++ b/tests/playwright/Dockerfile @@ -0,0 +1,27 @@ +# Playwright E2E test-runner image. +# +# Build context: the repository root (so all test files and the component +# source are available inside the container). +FROM python:3.12-slim + +WORKDIR /app + +# System packages needed by Playwright's bundled Chromium +RUN apt-get update && apt-get install -y --no-install-recommends \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Python dependencies (test suite) +COPY tests/playwright/requirements.txt ./playwright-requirements.txt +RUN pip install --no-cache-dir -r playwright-requirements.txt && \ + playwright install chromium && \ + playwright install-deps chromium + +# Copy the full repository so the component source and all test files +# are available at /app (component root) and /app/tests/playwright/. +COPY . /app + +COPY tests/playwright/entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/tests/playwright/Dockerfile.ssh b/tests/playwright/Dockerfile.ssh new file mode 100644 index 0000000..9f3cb59 --- /dev/null +++ b/tests/playwright/Dockerfile.ssh @@ -0,0 +1,49 @@ +# Minimal SSH test server with a single user (foo / pass). +# One sshd daemon runs on the standard port 22 so the Home Assistant +# integration (which defaults to port 22) can connect without any +# port-number configuration. +FROM ubuntu:24.04 + +RUN apt-get update -qq && \ + DEBIAN_FRONTEND=noninteractive apt-get install -qq -y --no-install-recommends \ + openssh-server \ + && rm -rf /var/lib/apt/lists/* + +# Create the test user +RUN useradd -m -s /bin/sh foo && \ + echo "foo:pass" | chpasswd + +# Write an sshd config +RUN printf '%s\n' \ + 'HostKey /etc/ssh/ssh_host_rsa_key' \ + 'HostKey /etc/ssh/ssh_host_ecdsa_key' \ + 'HostKey /etc/ssh/ssh_host_ed25519_key' \ + 'AuthorizedKeysFile .ssh/authorized_keys' \ + 'PasswordAuthentication yes' \ + 'PubkeyAuthentication yes' \ + 'KbdInteractiveAuthentication no' \ + 'UsePAM no' \ + 'PrintMotd no' \ + 'PrintLastLog no' \ + 'Subsystem sftp /usr/lib/openssh/sftp-server' \ + > /etc/ssh/sshd_config.d/test.conf + +# Generate host keys, create the privilege-separation directory, and create +# the test user's ed25519 auth key pair (used by key-file authentication tests). +RUN ssh-keygen -A && \ + mkdir -p /run/sshd && \ + mkdir -p /home/foo/.ssh && \ + chmod 700 /home/foo/.ssh && \ + ssh-keygen -t ed25519 -f /home/foo/.ssh/id_ed25519 -N "" && \ + cat /home/foo/.ssh/id_ed25519.pub > /home/foo/.ssh/authorized_keys && \ + chmod 600 /home/foo/.ssh/id_ed25519 && \ + chmod 644 /home/foo/.ssh/id_ed25519.pub /home/foo/.ssh/authorized_keys && \ + chown -R foo:foo /home/foo/.ssh + +# Startup script: populates the shared init volume then starts sshd. +COPY ssh-init-entrypoint.sh /ssh-init-entrypoint.sh +RUN chmod +x /ssh-init-entrypoint.sh + +EXPOSE 22 + +CMD ["/ssh-init-entrypoint.sh"] diff --git a/tests/playwright/README.md b/tests/playwright/README.md new file mode 100644 index 0000000..8ee2cee --- /dev/null +++ b/tests/playwright/README.md @@ -0,0 +1,122 @@ +# SSH Command Playwright E2E Tests + +End-to-end tests for the **SSH Command** Home Assistant custom component using +[Playwright](https://playwright.dev/python/). + +## Running with Docker (recommended) + +The repository ships a `docker-compose.yaml` that starts Home Assistant, the +SSH test servers, and a self-contained Playwright test-runner — no local Python +environment or browser installation required. + +```bash +# From the repository root: + +# First run: build the images (only needed once, or after code changes) +docker compose build + +# Run the full E2E suite +docker compose run --rm playwright-tests + +# Stop background services and remove volumes when done +docker compose down -v +``` + +On the **first run** the test-runner container automatically creates the HA +admin user via the onboarding API, so no manual UI interaction is needed. + +Test results (JUnit XML) are written to `playwright-results/` in the repository +root and can be used by CI or inspected locally. + +## Running the full CI suite locally + +`run_workflows_locally.sh` now includes the Playwright E2E tests. It calls +`docker compose run` directly instead of going through `act`: + +```bash +./run_workflows_locally.sh +``` + +## Running without Docker (advanced) + +If you prefer to run outside the container (e.g. against a pre-existing HA +instance), install dependencies on the host and point the env vars at your +services: + +```bash +# Install dependencies +pip install -r tests/playwright/requirements.txt +playwright install chromium + +# Point at your services +export HOMEASSISTANT_URL=http://localhost:8123 +export SSH_HOST=localhost +export SSH_PORT_1=2222 +export SSH_PORT_2=2223 +export HA_USERNAME=admin +export HA_PASSWORD=admin + +pytest tests/playwright/ -v +``` + +## GitHub Actions + +The `.github/workflows/playwright-tests.yml` workflow runs the full suite on +every push. It builds the images, calls `docker compose run playwright-tests`, +and uploads `playwright-results/junit.xml` as a workflow artifact. + +## Environment Variables + +| Variable | Default | Description | +|---|---|---| +| `HOMEASSISTANT_URL` | `http://homeassistant:8123` | Home Assistant base URL | +| `SSH_HOST` | `ssh_docker_test` | Hostname of the SSH test servers | +| `SSH_PORT_1` | `2222` | Port for SSH Test Server 1 (fixture metadata only) | +| `SSH_PORT_2` | `2223` | Port for SSH Test Server 2 (fixture metadata only) | +| `SSH_USER` | `foo` | SSH username | +| `SSH_PASSWORD` | `pass` | SSH password | +| `HA_USERNAME` | `admin` | Home Assistant admin username | +| `HA_PASSWORD` | `admin` | Home Assistant admin password | + +## Docker image layout + +| File | Purpose | +|---|---| +| `Dockerfile` | Playwright test-runner (Python 3.12 + Chromium) | +| `Dockerfile.ssh` | SSH test server (Ubuntu 24.04 + two sshd daemons on ports 2222/2223) | +| `entrypoint.sh` | Container startup: wait for HA → onboard → run pytest | +| `docker-compose.yaml` | (repo root) Orchestrates all three services | + +## Test Modules + +| File | What it tests | +|---|---| +| `test_integration_setup.py` | Add/assert duplicate blocked/remove lifecycle | +| `test_command_execution.py` | Executing SSH commands against real test servers | +| `test_services.py` | The `ssh_command.execute` HA service interface | +| `test_frontend.py` | Home Assistant frontend pages and UI interactions | +| `test_configuration.py` | Configuration options (timeout, auth, known hosts, …) | +| `test_security.py` | Security properties (auth validation, unauthenticated access, …) | + +## Fixtures (`conftest.py`) + +| Fixture | Scope | Description | +|---|---|---| +| `playwright_instance` | session | Playwright instance | +| `browser` | session | Headless Chromium browser | +| `ha_base_url` | session | Configured HA URL | +| `ha_token` | session | Long-lived HA access token | +| `context` | function | Authenticated browser context | +| `page` | function | Fresh page within the authenticated context | +| `ssh_server_1` | session | Connection params for SSH server 1 | +| `ssh_server_2` | session | Connection params for SSH server 2 | +| `ha_api` | function | `requests.Session` for the HA REST API | +| `ensure_integration` | function | Ensures SSH Command is set up; fully restores state after test | + +## Notes + +- Tests are **idempotent** – each test cleans up after itself. +- Tests do **not** depend on each other. +- Browser-based tests use a headless Chromium instance. +- API-based tests call Home Assistant's REST API directly for speed. + diff --git a/tests/playwright/conftest.py b/tests/playwright/conftest.py new file mode 100644 index 0000000..9f3fc69 --- /dev/null +++ b/tests/playwright/conftest.py @@ -0,0 +1,341 @@ +"""Pytest configuration and fixtures for SSH Command Playwright E2E tests.""" + +from __future__ import annotations + +import json +import os +import time +from typing import Any, Generator + +import pytest +import requests +from playwright.sync_api import Browser, BrowserContext, Page, Playwright, sync_playwright + +# --------------------------------------------------------------------------- +# Environment-variable driven configuration +# --------------------------------------------------------------------------- + +HA_URL: str = os.environ.get("HOMEASSISTANT_URL", "http://homeassistant:8123") +# Each SSH test server is a separate container (both on port 22, the default). +SSH_HOST_1: str = os.environ.get("SSH_HOST_1", "ssh_docker_test_1") +SSH_HOST_2: str = os.environ.get("SSH_HOST_2", "ssh_docker_test_2") +SSH_USER: str = os.environ.get("SSH_USER", "foo") +SSH_PASSWORD: str = os.environ.get("SSH_PASSWORD", "pass") + +HA_USERNAME: str = os.environ.get("HA_USERNAME", "admin") +HA_PASSWORD: str = os.environ.get("HA_PASSWORD", "admin") + +# Paths on the HA container's filesystem populated by ssh_docker_test_1's +# startup script (see tests/playwright/ssh-init-entrypoint.sh). +# ssh_test_init volume is mounted read-only at /ssh-test-keys in the HA +# container, providing a user auth key and a known_hosts file for tests. +SSH_KEY_FILE: str = os.environ.get("SSH_KEY_FILE", "/ssh-test-keys/id_ed25519") +SSH_KNOWN_HOSTS: str = os.environ.get("SSH_KNOWN_HOSTS", "/ssh-test-keys/known_hosts") + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +_HA_TOKEN: str | None = None + + +def get_ha_token() -> str: + """Obtain a Home Assistant access token via the login flow. + + On the first call the token is fetched and cached for the remainder of + the test session. Retries up to 5 times with a short delay to handle + the window immediately after HA onboarding completes. + """ + global _HA_TOKEN # noqa: PLW0603 + if _HA_TOKEN: + return _HA_TOKEN + + last_exc: Exception | None = None + for attempt in range(5): + if attempt: + time.sleep(5) + try: + session = requests.Session() + + # 1. Initiate the login flow + flow_resp = session.post( + f"{HA_URL}/auth/login_flow", + json={ + "client_id": f"{HA_URL}/", + "handler": ["homeassistant", None], + "redirect_uri": f"{HA_URL}/", + }, + timeout=30, + ) + flow_resp.raise_for_status() + flow_id = flow_resp.json()["flow_id"] + + # 2. Submit credentials + cred_resp = session.post( + f"{HA_URL}/auth/login_flow/{flow_id}", + json={ + "username": HA_USERNAME, + "password": HA_PASSWORD, + "client_id": f"{HA_URL}/", + }, + timeout=30, + ) + cred_resp.raise_for_status() + cred_data = cred_resp.json() + if cred_data.get("type") != "create_entry": + raise RuntimeError( + f"Login flow did not complete: type={cred_data.get('type')!r}, " + f"errors={cred_data.get('errors')}" + ) + auth_code = cred_data["result"] + + # 3. Exchange code for token + token_resp = session.post( + f"{HA_URL}/auth/token", + data={ + "grant_type": "authorization_code", + "code": auth_code, + "client_id": f"{HA_URL}/", + }, + timeout=30, + ) + token_resp.raise_for_status() + _HA_TOKEN = token_resp.json()["access_token"] + return _HA_TOKEN + except Exception as exc: # noqa: BLE001 + last_exc = exc + + raise RuntimeError(f"Failed to obtain HA token after 5 attempts: {last_exc}") from last_exc + + +def wait_for_ha(timeout: int = 300) -> None: + """Block until Home Assistant is fully started and accepts API requests. + + Polls GET /api/onboarding which requires no authentication and therefore + cannot trigger HA's IP-ban mechanism. The endpoint returns HTTP 200 even + during onboarding, so it is safe to use as a startup indicator. + + A second pass waits for the integration to be loadable (the custom + component may still be installing its requirements). + """ + deadline = time.time() + timeout + + # Phase 1: wait for the web server to respond at all + while time.time() < deadline: + try: + resp = requests.get(f"{HA_URL}/api/onboarding", timeout=5) + if resp.status_code == 200: + break + except requests.RequestException: + pass + time.sleep(3) + else: + raise RuntimeError(f"Home Assistant did not become ready within {timeout}s") + + # Phase 2: wait for the config-entries API to be usable (integrations loaded) + # We use a small fixed delay to let HA finish loading custom components and + # installing their requirements (asyncssh etc.) after the web server is up. + time.sleep(15) + + + +# --------------------------------------------------------------------------- +# Session-scoped Playwright fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture(scope="session") +def playwright_instance() -> Generator[Playwright, None, None]: + """Provide a session-scoped Playwright instance.""" + with sync_playwright() as pw: + yield pw + + +@pytest.fixture(scope="session") +def browser(playwright_instance: Playwright) -> Generator[Browser, None, None]: + """Provide a session-scoped Chromium browser.""" + browser = playwright_instance.chromium.launch(headless=True) + yield browser + browser.close() + + +@pytest.fixture(scope="session") +def ha_base_url() -> str: + """Return the configured Home Assistant base URL.""" + return HA_URL + + +@pytest.fixture(scope="session") +def ha_token() -> str: + """Provide a valid Home Assistant long-lived access token.""" + wait_for_ha() + return get_ha_token() + + +# --------------------------------------------------------------------------- +# Per-test browser context with an authenticated HA session +# --------------------------------------------------------------------------- + + +@pytest.fixture() +def context(browser: Browser, ha_token: str) -> Generator[BrowserContext, None, None]: + """Provide an authenticated browser context for Home Assistant. + + The HA frontend reads ``hassTokens`` from ``localStorage`` to determine + whether the user is authenticated. Using Playwright's ``storage_state`` + pre-populates ``localStorage`` *before* the first navigation, which is + more reliable than ``add_init_script`` (the latter can lose a race with + HA's own auth-check code and cause a redirect to ``/onboarding.html``). + """ + hass_tokens = json.dumps({ + "access_token": ha_token, + "token_type": "Bearer", + "expires_in": 1800, + "hassUrl": HA_URL, + "clientId": f"{HA_URL}/", + "expires": int(time.time() * 1000) + 1_800_000, + "refresh_token": "", + }) + ctx = browser.new_context( + base_url=HA_URL, + storage_state={ + "cookies": [], + "origins": [ + { + "origin": HA_URL, + "localStorage": [ + {"name": "hassTokens", "value": hass_tokens}, + ], + } + ], + }, + ) + yield ctx + ctx.close() + + +@pytest.fixture() +def page(context: BrowserContext) -> Generator[Page, None, None]: + """Provide a fresh page within the authenticated browser context.""" + pg = context.new_page() + yield pg + pg.close() + + +# --------------------------------------------------------------------------- +# SSH server fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture(scope="session") +def ssh_server_1() -> dict: + """Return connection parameters for SSH Test Server 1. + + The server runs sshd on the standard port 22, which the Home Assistant + integration uses by default. + """ + return { + "host": SSH_HOST_1, + "username": SSH_USER, + "password": SSH_PASSWORD, + } + + +@pytest.fixture(scope="session") +def ssh_server_2() -> dict: + """Return connection parameters for SSH Test Server 2. + + A separate container from ssh_server_1 so the two servers are genuinely + independent (different hostnames). + """ + return { + "host": SSH_HOST_2, + "username": SSH_USER, + "password": SSH_PASSWORD, + } + + +# --------------------------------------------------------------------------- +# Integration setup / teardown helper +# --------------------------------------------------------------------------- + + +@pytest.fixture() +def ha_api(ha_token: str) -> requests.Session: + """Return a requests Session pre-configured to call the HA REST API.""" + session = requests.Session() + session.headers["Authorization"] = f"Bearer {ha_token}" + session.headers["Content-Type"] = "application/json" + return session + + +@pytest.fixture() +def ensure_integration(ha_api: requests.Session) -> Generator[None, None, None]: + """Ensure the SSH Command integration is set up before a test runs. + + After the test the environment is restored to its exact pre-test state: + - Any entries added during the test are removed. + - Any entries that were present before but removed during the test are + re-added, so subsequent test runs start from the same baseline. + """ + # Snapshot state before the test + resp = ha_api.get(f"{HA_URL}/api/config/config_entries/entry") + resp.raise_for_status() + entries_before: set[str] = { + e["entry_id"] + for e in resp.json() + if e.get("domain") == "ssh_command" + } + + # There should be not entry + assert not entries_before + + # If the integration is not yet configured, add it now + flow_resp = ha_api.post( + f"{HA_URL}/api/config/config_entries/flow", + json={"handler": "ssh_command"}, + ) + flow_resp.raise_for_status() + + yield + + # --- Teardown: restore pre-test state --- + resp = ha_api.get(f"{HA_URL}/api/config/config_entries/entry") + resp.raise_for_status() + entries_after: set[str] = { + e["entry_id"] + for e in resp.json() + if e.get("domain") == "ssh_command" + } + + # Remove entries that were added during the test + for entry_id in entries_after - entries_before: + ha_api.delete(f"{HA_URL}/api/config/config_entries/entry/{entry_id}") + +def _get_ssh_command_entry_ids(ha_api: requests.Session) -> set[str]: + """Return the set of current ssh_command config-entry IDs.""" + resp = ha_api.get(f"{HA_URL}/api/config/config_entries/entry") + resp.raise_for_status() + return {e["entry_id"] for e in resp.json() if e.get("domain") == "ssh_command"} + + +def _add_integration(ha_api: requests.Session) -> None: + """Initiate the SSH Command config flow. + + The call starts the config flow; Home Assistant will immediately complete + it and create the single config entry (SSH Command has no form fields and + single_instance_allowed=True). The HTTP response status is validated but + the caller is responsible for confirming the resulting entry state when + strict verification is needed. + """ + resp = ha_api.post( + f"{HA_URL}/api/config/config_entries/flow", + json={"handler": "ssh_command"}, + ) + resp.raise_for_status() + + +def _remove_all_ssh_command_entries(ha_api: requests.Session) -> None: + """Delete every ssh_command config entry from Home Assistant.""" + for entry_id in _get_ssh_command_entry_ids(ha_api): + ha_api.delete(f"{HA_URL}/api/config/config_entries/entry/{entry_id}") diff --git a/tests/playwright/entrypoint.sh b/tests/playwright/entrypoint.sh new file mode 100644 index 0000000..305ab9e --- /dev/null +++ b/tests/playwright/entrypoint.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +# entrypoint.sh — startup script for the Playwright E2E test-runner container. +# +# 1. Waits for Home Assistant to become reachable. +# 2. Runs the full HA onboarding flow to create the admin user and complete all +# onboarding steps (if they haven't been completed yet). +# 3. Hands off to pytest. + +set -euo pipefail + +HA_URL="${HOMEASSISTANT_URL:-http://homeassistant:8123}" +HA_USER="${HA_USERNAME:-admin}" +HA_PASS="${HA_PASSWORD:-admin}" +RESULTS_DIR="/app/playwright-results" + +log() { echo "[entrypoint] $*"; } + +mkdir -p "${RESULTS_DIR}" + +# ── 1. Wait for Home Assistant to respond ──────────────────────────────────── +log "Waiting for Home Assistant at ${HA_URL} …" +ATTEMPT=0 +MAX_ATTEMPTS=120 +until HTTP=$(curl -s -o /dev/null -w "%{http_code}" "${HA_URL}/api/onboarding" 2>/dev/null) && \ + [[ "${HTTP}" =~ ^[2-4][0-9]{2}$ ]]; do + ATTEMPT=$(( ATTEMPT + 1 )) + if [[ "${ATTEMPT}" -ge "${MAX_ATTEMPTS}" ]]; then + log "ERROR: Home Assistant did not become ready after ${MAX_ATTEMPTS} attempts." + exit 1 + fi + log " Attempt ${ATTEMPT}/${MAX_ATTEMPTS} (HTTP ${HTTP:-000}), retrying in 5 s …" + sleep 5 +done +log "Home Assistant is responding." + +# ── 2. Onboarding (complete all steps on first start) ───────────────────────── +ONBOARDING=$(curl -sf "${HA_URL}/api/onboarding" 2>/dev/null || echo '[]') + +# Check whether the "user" step is already done. +USER_DONE=$(_ONBOARDING="${ONBOARDING}" python3 - <<'PYEOF' +import json, os, sys +try: + data = json.loads(os.environ.get("_ONBOARDING", "[]")) + if not isinstance(data, list): + raise ValueError("unexpected onboarding format") + print("true" if any(s.get("step") == "user" and s.get("done") for s in data) else "false") +except Exception as e: + # Unknown format – assume NOT done so we attempt onboarding + print("false") +PYEOF +) + +if [[ "${USER_DONE}" == "false" ]]; then + log "Running HA onboarding — creating admin user '${HA_USER}' …" + + # Step 1: Create user; returns {"auth_code": "...", "client_id": "..."} + PAYLOAD="{\"client_id\":\"${HA_URL}/\",\"name\":\"Admin\",\"username\":\"${HA_USER}\",\"password\":\"${HA_PASS}\",\"language\":\"en\"}" + USER_RESPONSE=$(curl -sf -X POST "${HA_URL}/api/onboarding/users" \ + -H "Content-Type: application/json" \ + -d "${PAYLOAD}" 2>&1) || { + log "WARNING: Onboarding/users request failed. HA may already be fully onboarded." + USER_RESPONSE="" + } + + if [[ -n "${USER_RESPONSE}" ]]; then + # Step 2: Exchange the auth_code for a bearer token + AUTH_TOKEN=$(_RESP="${USER_RESPONSE}" HA_URL="${HA_URL}" python3 - <<'PYEOF' +import json, os, sys, urllib.request, urllib.parse + +resp = os.environ.get("_RESP", "") +ha_url = os.environ.get("HA_URL", "") + +try: + auth_code = json.loads(resp)["auth_code"] +except Exception as e: + print("") + sys.exit(0) + +data = urllib.parse.urlencode({ + "grant_type": "authorization_code", + "code": auth_code, + "client_id": ha_url + "/", +}).encode() +req = urllib.request.Request(f"{ha_url}/auth/token", data=data, + method="POST") +try: + with urllib.request.urlopen(req, timeout=30) as r: + print(json.loads(r.read())["access_token"]) +except Exception as e: + print("") +PYEOF + ) + + if [[ -n "${AUTH_TOKEN}" ]]; then + # Step 3: Complete remaining onboarding steps with the new token + for STEP in core_config analytics integration; do + log " Completing onboarding step: ${STEP} …" + curl -sf -X POST "${HA_URL}/api/onboarding/${STEP}" \ + -H "Authorization: Bearer ${AUTH_TOKEN}" \ + -H "Content-Type: application/json" \ + -d '{}' > /dev/null 2>&1 || \ + log " WARNING: step '${STEP}' returned an error (may be harmless)." + done + fi + fi + + log "Onboarding complete." + # Give HA a moment to settle after onboarding + sleep 10 +fi + +# ── 3. Run the test suite ───────────────────────────────────────────────────── +log "Starting Playwright E2E test suite …" + +# Run from tests/playwright/ so pytest does not traverse up into the HA +# component package (which would try to import voluptuous etc.). +cd /app/tests/playwright +exec pytest . \ + --tb=short \ + -v \ + --junitxml="${RESULTS_DIR}/junit.xml" \ + "$@" diff --git a/tests/playwright/ha-init-wrapper.sh b/tests/playwright/ha-init-wrapper.sh new file mode 100755 index 0000000..566165a --- /dev/null +++ b/tests/playwright/ha-init-wrapper.sh @@ -0,0 +1,35 @@ +#!/bin/sh +# ha-init-wrapper.sh — pre-populates /etc/hosts before handing off to the +# real Home Assistant init script (/init). +# +# Alpine Linux (musl libc) cannot resolve Docker container hostnames via +# Python's socket module because musl's DNS resolver fails against Docker's +# embedded DNS server (127.0.0.11) in some CI environments, even though +# busybox's nslookup (which makes direct UDP queries) works fine. +# +# By adding /etc/hosts entries via nslookup first, Python's resolver uses the +# "files" path from nsswitch.conf and succeeds without touching DNS at all. + +set -u + +add_host() { + local name="$1" + local ip + ip=$(nslookup "$name" 127.0.0.11 2>/dev/null | sed -n 's/^Address: //p' | tail -1) + if [ -n "$ip" ]; then + # Avoid duplicate entries on container restart + if ! grep -q " $name" /etc/hosts 2>/dev/null; then + printf '%s\t%s\n' "$ip" "$name" >> /etc/hosts + echo "[ha-init-wrapper] Added /etc/hosts entry: $ip $name" + fi + else + echo "[ha-init-wrapper] WARNING: could not resolve $name via nslookup" + fi +} + +for host in ssh_docker_test_1 ssh_docker_test_2; do + add_host "$host" +done + +# Hand off to the original HA init process +exec /init diff --git a/tests/playwright/pytest.ini b/tests/playwright/pytest.ini new file mode 100644 index 0000000..f254211 --- /dev/null +++ b/tests/playwright/pytest.ini @@ -0,0 +1,7 @@ +[pytest] +# Rootdir anchor for the Playwright E2E test suite. +# +# This file pins pytest's rootdir to tests/playwright/ so that pytest does NOT +# traverse up into the HA component package (/app/__init__.py), which imports +# HA-specific packages (voluptuous etc.) that are not installed in the test +# runner image. diff --git a/tests/playwright/requirements.txt b/tests/playwright/requirements.txt new file mode 100644 index 0000000..e0b541e --- /dev/null +++ b/tests/playwright/requirements.txt @@ -0,0 +1,4 @@ +pytest>=8.0.0,<9.0.0 +pytest-playwright>=0.5.0,<1.0.0 +playwright>=1.44.0,<2.0.0 +requests>=2.32.0,<3.0.0 diff --git a/tests/playwright/ssh-init-entrypoint.sh b/tests/playwright/ssh-init-entrypoint.sh new file mode 100644 index 0000000..664bfe3 --- /dev/null +++ b/tests/playwright/ssh-init-entrypoint.sh @@ -0,0 +1,37 @@ +#!/bin/sh +# SSH test server startup script. +# +# If the /ssh-init-data directory is mounted (a shared volume also mounted +# read-only into the HA container at /ssh-test-keys), this script writes two +# files into it before starting sshd: +# +# id_ed25519 — the test user's ed25519 private key (generated at image +# build time); lets the HA integration connect with +# key_file="/ssh-test-keys/id_ed25519" in tests. +# +# known_hosts — one line in OpenSSH known_hosts format containing this +# container's ed25519 host public key; used by tests that +# set check_known_hosts=True and known_hosts="/ssh-test-keys/known_hosts". +# +# The container name is injected by docker-compose via the CONTAINER_NAME +# environment variable and is used as the hostname in the known_hosts line. + +set -e + +if [ -d /ssh-init-data ]; then + printf '[ssh-init] Populating /ssh-init-data/ ...\n' + + # User auth private key (generated at image build time, same across all + # containers that share this image). + cp /home/foo/.ssh/id_ed25519 /ssh-init-data/id_ed25519 + chmod 644 /ssh-init-data/id_ed25519 + + # known_hosts line: + HOST="${CONTAINER_NAME:-$(hostname)}" + awk -v h="${HOST}" '{print h " " $1 " " $2}' \ + /etc/ssh/ssh_host_ed25519_key.pub > /ssh-init-data/known_hosts + + printf '[ssh-init] Done (host=%s).\n' "${HOST}" +fi + +exec /usr/sbin/sshd -D diff --git a/tests/playwright/test_command_execution.py b/tests/playwright/test_command_execution.py new file mode 100644 index 0000000..9645b15 --- /dev/null +++ b/tests/playwright/test_command_execution.py @@ -0,0 +1,184 @@ +"""Playwright E2E tests: SSH command execution against real SSH test servers.""" + +from __future__ import annotations + +from typing import Any + +import requests + +from conftest import HA_URL + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def execute(ha_api: requests.Session, payload: dict) -> requests.Response: + """Call the ssh_command.execute service and return the raw response.""" + return ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json=payload, + ) + + +def svc_data(resp: requests.Response) -> dict: + """Extract the ssh_command service response dict from an HA API response. + + HA wraps service responses in ``{"service_response": {...}, "changed_states": [...]}``. + """ + return resp.json().get("service_response", resp.json()) + + +def base_payload(ssh_server: dict, command: str, **kwargs) -> dict: + """Build a minimal execute payload from a server fixture and a command. + + Extra keyword arguments are merged into the payload, allowing callers to + override any field (e.g. ``timeout``, ``check_known_hosts``). + """ + payload = { + "host": ssh_server["host"], + "username": ssh_server["username"], + "password": ssh_server["password"], + "command": command, + "check_known_hosts": False, + } + payload.update(kwargs) + return payload + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestCommandExecution: + """End-to-end tests that execute real commands on the SSH test servers.""" + + def test_echo_command(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_1: dict) -> None: + """A simple echo command returns the expected string on stdout.""" + resp = execute(ha_api, base_payload(ssh_server_1, "echo hello")) + assert resp.status_code == 200, resp.text + data = svc_data(resp) + assert "hello" in data.get("output", "") + assert data.get("exit_status") == 0 + + def test_pwd_command(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_1: dict) -> None: + """The pwd command returns a non-empty path.""" + resp = execute(ha_api, base_payload(ssh_server_1, "pwd")) + assert resp.status_code == 200, resp.text + data = svc_data(resp) + assert data.get("output", "").strip() != "" + assert data.get("exit_status") == 0 + + def test_command_stdout_captured(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Multiline output is fully captured.""" + resp = execute(ha_api, base_payload(ssh_server_1, "printf 'line1\\nline2\\nline3\\n'")) + assert resp.status_code == 200, resp.text + output = svc_data(resp).get("output", "") + assert "line1" in output + assert "line2" in output + assert "line3" in output + + def test_command_stderr_captured(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Output written to stderr is captured in the 'error' field.""" + resp = execute(ha_api, base_payload(ssh_server_1, "echo error_message >&2")) + assert resp.status_code == 200, resp.text + data = svc_data(resp) + assert "error_message" in data.get("error", "") + + def test_nonzero_exit_status(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_1: dict) -> None: + """A failing command returns a non-zero exit status.""" + resp = execute(ha_api, base_payload(ssh_server_1, "exit 42")) + assert resp.status_code == 200, resp.text + assert svc_data(resp).get("exit_status") == 42 + + def test_zero_exit_status(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_1: dict) -> None: + """A successful command returns exit status 0.""" + resp = execute(ha_api, base_payload(ssh_server_1, "true")) + assert resp.status_code == 200, resp.text + assert svc_data(resp).get("exit_status") == 0 + + def test_command_with_env_variable(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Environment variable expansion works inside commands.""" + resp = execute(ha_api, base_payload(ssh_server_1, "echo $HOME")) + assert resp.status_code == 200, resp.text + assert svc_data(resp).get("output", "").strip() != "" + + def test_second_ssh_server(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_2: dict) -> None: + """Commands can be executed against the second SSH test server.""" + resp = execute(ha_api, base_payload(ssh_server_2, "echo server2")) + assert resp.status_code == 200, resp.text + assert "server2" in svc_data(resp).get("output", "") + + def test_command_timeout_handling(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """A command that exceeds the timeout returns a 400 error.""" + payload = base_payload(ssh_server_1, "sleep 60") + payload["timeout"] = 2 + resp = execute(ha_api, payload) + # HA raises ServiceValidationError for timeout → HTTP 400 + assert resp.status_code >= 400, resp.text + + def test_command_not_provided_requires_input(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Omitting both command and input returns a 400 validation error.""" + payload = { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "check_known_hosts": False, + } + resp = execute(ha_api, payload) + assert resp.status_code >= 400, resp.text + + def test_no_password_or_key_returns_error(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Omitting both password and key_file returns a 400 validation error.""" + payload = { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "command": "echo hi", + "check_known_hosts": False, + } + resp = execute(ha_api, payload) + assert resp.status_code >= 400, resp.text + + def test_input_parameter_stdin(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_1: dict) -> None: + """The 'input' parameter pipes text to the command's stdin.""" + resp = execute( + ha_api, + base_payload(ssh_server_1, "cat", input="hello from stdin\n"), + ) + assert resp.status_code == 200, resp.text + assert "hello from stdin" in svc_data(resp)["output"] + + def test_all_optional_parameters(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Supplying every optional parameter in a single call works correctly.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "cat", + "input": "all_params\n", + "check_known_hosts": False, + "timeout": 20, + }, + ) + assert resp.status_code == 200, resp.text + data = svc_data(resp) + assert "all_params" in data["output"] + assert data["exit_status"] == 0 + + def test_long_output_command(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_1: dict) -> None: + """A command that produces a large amount of output is handled correctly.""" + resp = execute(ha_api, base_payload(ssh_server_1, "seq 1 500")) + assert resp.status_code == 200, resp.text + output = svc_data(resp).get("output", "") + assert "500" in output diff --git a/tests/playwright/test_configuration.py b/tests/playwright/test_configuration.py new file mode 100644 index 0000000..5adb6e6 --- /dev/null +++ b/tests/playwright/test_configuration.py @@ -0,0 +1,248 @@ +"""Playwright E2E tests: SSH Command configuration management.""" + +from __future__ import annotations + +from typing import Any + +import requests + +from conftest import HA_URL, SSH_KEY_FILE, SSH_KNOWN_HOSTS + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def execute(ha_api: requests.Session, payload: dict) -> requests.Response: + """Call the ssh_command.execute service.""" + return ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json=payload, + ) + + +def svc_data(resp: requests.Response) -> dict: + """Extract the ssh_command service response dict from an HA API response.""" + return resp.json().get("service_response", resp.json()) + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestConfiguration: + """Tests covering configuration options of the SSH Command integration.""" + + def test_default_timeout_accepted(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Omitting the timeout field uses the default (30 s) and the call succeeds.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo default_timeout", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert "default_timeout" in svc_data(resp)["output"] + + def test_custom_timeout_accepted(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """An explicit timeout value is accepted by the service schema.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo custom_timeout", + "check_known_hosts": False, + "timeout": 20, + }, + ) + assert resp.status_code == 200, resp.text + assert "custom_timeout" in svc_data(resp)["output"] + + def test_check_known_hosts_false(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Setting check_known_hosts=False bypasses host verification.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo no_host_check", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert "no_host_check" in svc_data(resp)["output"] + + def test_known_hosts_with_check_disabled_rejected(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Providing known_hosts while check_known_hosts=False is a validation error.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo hi", + "check_known_hosts": False, + "known_hosts": "/tmp/known_hosts", + }, + ) + assert resp.status_code >= 400, resp.text + + def test_password_auth_configuration(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Password-based authentication is accepted and works against the test server.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo password_auth", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert "password_auth" in svc_data(resp)["output"] + + def test_key_file_not_found_rejected(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Providing a non-existent key_file path results in a validation error.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "key_file": "/nonexistent/id_rsa", + "command": "echo hi", + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_multiple_servers_independent( + self, + ha_api: requests.Session, + ensure_integration: Any, + ssh_server_1: dict, + ssh_server_2: dict, + ) -> None: + """Commands can be sent to two different SSH servers independently.""" + resp1 = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo server1", + "check_known_hosts": False, + }, + ) + resp2 = execute( + ha_api, + { + "host": ssh_server_2["host"], + "username": ssh_server_2["username"], + "password": ssh_server_2["password"], + "command": "echo server2", + "check_known_hosts": False, + }, + ) + assert resp1.status_code == 200, resp1.text + assert resp2.status_code == 200, resp2.text + assert "server1" in svc_data(resp1)["output"] + assert "server2" in svc_data(resp2)["output"] + + def test_username_configuration(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """The username field is correctly forwarded to the SSH connection.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "whoami", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + output = svc_data(resp)["output"].strip() + assert output == ssh_server_1["username"] + + def test_key_file_authentication(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Authenticate using a private key file instead of a password. + + The ed25519 key pair is generated at image build time and written to the + shared ssh_test_init volume by ssh_docker_test_1's startup script. The + public key is preloaded into the test user's authorized_keys, so the + HA integration can authenticate with key_file only (no password). + """ + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "key_file": SSH_KEY_FILE, + "command": "echo key_auth_ok", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert "key_auth_ok" in svc_data(resp)["output"] + + def test_check_known_hosts_true_valid(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """check_known_hosts=True with a matching known_hosts file succeeds. + + The ssh_docker_test_1 startup script writes the server's ed25519 host + public key to the shared volume. The test supplies that file via the + known_hosts parameter, so host verification should pass. + """ + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo known_hosts_ok", + "check_known_hosts": True, + "known_hosts": SSH_KNOWN_HOSTS, + }, + ) + assert resp.status_code == 200, resp.text + assert "known_hosts_ok" in svc_data(resp)["output"] + + def test_check_known_hosts_true_unknown_server(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """check_known_hosts=True without a valid known_hosts file returns an error. + + When check_known_hosts=True and no known_hosts path is supplied, the + coordinator falls back to ~/.ssh/known_hosts, which will not contain the + test server's host key. The connection attempt must be rejected. + """ + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo hi", + "check_known_hosts": True, + # No known_hosts supplied — HA falls back to ~/.ssh/known_hosts + # which does not contain the test server's key. + }, + ) + assert resp.status_code >= 400, resp.text diff --git a/tests/playwright/test_frontend.py b/tests/playwright/test_frontend.py new file mode 100644 index 0000000..2f1a877 --- /dev/null +++ b/tests/playwright/test_frontend.py @@ -0,0 +1,82 @@ +"""Playwright E2E tests: SSH Command frontend / UI interactions.""" + +from __future__ import annotations + +from typing import Any + +from playwright.sync_api import Page, expect + +from conftest import HA_URL + + +class TestFrontend: + """Tests that exercise the Home Assistant frontend with the SSH Command integration.""" + + def test_home_assistant_frontend_loads(self, page: Page) -> None: + """The Home Assistant frontend loads successfully.""" + page.goto(HA_URL) + page.wait_for_load_state("networkidle") + # HA login page or overview should load + expect(page).not_to_have_title("") + + def test_integrations_page_accessible(self, page: Page) -> None: + """The integrations settings page is accessible.""" + page.goto(f"{HA_URL}/config/integrations") + page.wait_for_load_state("networkidle") + # Page should not show a network error + assert page.url.startswith(HA_URL), f"Unexpected redirect to: {page.url}" + + def test_developer_tools_page_loads(self, page: Page) -> None: + """Developer tools page loads (used for calling services manually).""" + page.goto(f"{HA_URL}/developer-tools/service") + page.wait_for_load_state("networkidle") + assert page.url.startswith(HA_URL) + + def test_ssh_command_visible_in_integrations(self, page: Page, ensure_integration: Any) -> None: + """After setup, SSH Command appears on the integrations page.""" + page.goto(f"{HA_URL}/config/integrations") + page.wait_for_load_state("networkidle") + # Look for the integration card/name on the page + ssh_card = page.get_by_text("SSH Command", exact=False) + expect(ssh_card.first).to_be_visible() + + def test_service_call_via_developer_tools(self, page: Page, ensure_integration: Any) -> None: + """It should be possible to navigate to the service call UI for ssh_command.""" + page.goto(f"{HA_URL}/developer-tools/service") + page.wait_for_load_state("networkidle") + + # Open the service selector dropdown + service_selector = page.locator("ha-service-picker, [data-domain='ssh_command']").first + if service_selector.is_visible(): + service_selector.click() + page.wait_for_timeout(500) + # Look for ssh_command option + ssh_option = page.get_by_text("ssh_command", exact=False) + if ssh_option.is_visible(): + ssh_option.first.click() + + # Page should still be accessible (no crashes) + assert page.url.startswith(HA_URL) + + def test_config_page_shows_integration_info(self, page: Page, ensure_integration: Any) -> None: + """The SSH Command integration detail page shows expected information.""" + page.goto(f"{HA_URL}/config/integrations") + page.wait_for_load_state("networkidle") + + # Try to click on the SSH Command integration card + ssh_link = page.get_by_text("SSH Command", exact=False).first + if ssh_link.is_visible(): + ssh_link.click() + page.wait_for_load_state("networkidle") + # Verify we are still on a valid HA page + assert page.url.startswith(HA_URL) + + def test_no_javascript_errors_on_main_page(self, page: Page) -> None: + """The main HA page does not log critical JavaScript errors.""" + errors: list[str] = [] + page.on("pageerror", lambda exc: errors.append(str(exc))) + page.goto(HA_URL) + page.wait_for_load_state("networkidle") + # Filter out known non-critical errors; check only for unhandled exceptions + critical = [e for e in errors if "ResizeObserver" not in e] + assert len(critical) == 0, f"JavaScript errors: {critical}" diff --git a/tests/playwright/test_integration_setup.py b/tests/playwright/test_integration_setup.py new file mode 100644 index 0000000..c1e12dc --- /dev/null +++ b/tests/playwright/test_integration_setup.py @@ -0,0 +1,187 @@ +"""Playwright E2E tests: SSH Command integration setup via the config flow.""" + +from __future__ import annotations + +from typing import Any + +import requests + +from conftest import ( + HA_URL, + _get_ssh_command_entry_ids, + _remove_all_ssh_command_entries, +) + + +class TestIntegrationSetup: + """Tests that cover adding and removing the SSH Command integration.""" + + def test_connection_error_handling(self, ha_api: requests.Session, ensure_integration: Any) -> None: + """Calling execute with an unreachable host raises a validation error.""" + resp = ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json={ + "host": "192.0.2.1", # RFC 5737 TEST-NET – guaranteed unreachable + "username": "nobody", + "password": "nopass", + "command": "echo hi", + "check_known_hosts": False, + "timeout": 5, + }, + ) + # HA returns 500 for ServiceValidationError + assert resp.status_code == 500, resp.text + + def test_invalid_credentials_error(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Connecting with wrong credentials returns a permission-denied error.""" + resp = ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json={ + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": "wrongpassword", + "command": "echo hi", + "check_known_hosts": False, + "timeout": 10, + }, + ) + assert resp.status_code == 500, resp.text + + +class TestIntegrationLifecycle: + """Single end-to-end lifecycle test covering all five requirements: + + 1. Add the integration. + 2. Assert it cannot be added a second time. + 3. Send commands covering all service parameters. + 4. Remove the integration. + 5. Assert removal leaves the environment identical to its pre-test state + so the test can be repeated with no side effects. + """ + + def test_full_lifecycle(self, ha_api: requests.Session, ssh_server_1: dict, ssh_server_2: dict) -> None: + """Complete add → use → remove → verify-clean lifecycle.""" + + # ------------------------------------------------------------------ # + # 0. Precondition: start from a clean state (no integration present). # + # If a previous run left an entry behind, remove it first so this # + # test is idempotent. # + # ------------------------------------------------------------------ # + assert ssh_server_1["host"] != ssh_server_2["host"], ( + "ssh_server_1 and ssh_server_2 must be distinct servers for the multi-server scenario to be meaningful" + ) + _remove_all_ssh_command_entries(ha_api) + assert _get_ssh_command_entry_ids(ha_api) == set(), ( + "Precondition failed: ssh_command entries still present after cleanup" + ) + + # ------------------------------------------------------------------ # + # 1. Add the integration via the config flow. # + # ------------------------------------------------------------------ # + add_resp = ha_api.post( + f"{HA_URL}/api/config/config_entries/flow", + json={"handler": "ssh_command"}, + ) + assert add_resp.status_code in (200, 201), add_resp.text + assert add_resp.json().get("type") == "create_entry", ( + f"Expected 'create_entry', got: {add_resp.json().get('type')!r}" + ) + + entry_ids_after_add = _get_ssh_command_entry_ids(ha_api) + assert len(entry_ids_after_add) == 1, ( + f"Expected exactly 1 ssh_command entry, found: {len(entry_ids_after_add)}" + ) + entry_id = next(iter(entry_ids_after_add)) + + # ------------------------------------------------------------------ # + # 2. Assert the integration cannot be added a second time. # + # ------------------------------------------------------------------ # + second_add = ha_api.post( + f"{HA_URL}/api/config/config_entries/flow", + json={"handler": "ssh_command"}, + ) + assert second_add.status_code in (200, 201), second_add.text + assert second_add.json().get("type") == "abort", ( + f"Expected 'abort' on second add, got: {second_add.json().get('type')!r}" + ) + assert second_add.json().get("reason") == "single_instance_allowed" + # Still exactly one entry – the second attempt must not create another + assert _get_ssh_command_entry_ids(ha_api) == {entry_id} + + # ------------------------------------------------------------------ # + # 3. Send commands covering all service parameters. # + # ------------------------------------------------------------------ # + def call(payload: dict) -> dict: + r = ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json=payload, + ) + assert r.status_code == 200, f"Service call failed: {r.text}" + return r.json().get("service_response", r.json()) + + base = { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "check_known_hosts": False, + } + + # host + username + password + command + check_known_hosts + data = call({**base, "command": "echo hello"}) + assert "hello" in data["output"] + assert data["exit_status"] == 0 + + # timeout parameter + data = call({**base, "command": "echo timeout_ok", "timeout": 15}) + assert "timeout_ok" in data["output"] + + # command writing to stderr + data = call({**base, "command": "echo err_out >&2"}) + assert "err_out" in data["error"] + + # non-zero exit status + data = call({**base, "command": "exit 2"}) + assert data["exit_status"] == 2 + + # input parameter: send text to stdin via the 'cat' command + data = call({**base, "command": "cat", "input": "stdin_content\n"}) + assert "stdin_content" in data["output"] + + # second SSH server + base2 = { + "host": ssh_server_2["host"], + "username": ssh_server_2["username"], + "password": ssh_server_2["password"], + "check_known_hosts": False, + } + data = call({**base2, "command": "echo server2"}) + assert "server2" in data["output"] + + # ------------------------------------------------------------------ # + # 4. Remove the integration. # + # ------------------------------------------------------------------ # + del_resp = ha_api.delete( + f"{HA_URL}/api/config/config_entries/entry/{entry_id}" + ) + assert del_resp.status_code in (200, 204), del_resp.text + + # ------------------------------------------------------------------ # + # 5. Assert removal and environment parity with pre-test state. # + # ------------------------------------------------------------------ # + remaining = _get_ssh_command_entry_ids(ha_api) + assert remaining == set(), ( + f"Expected no ssh_command entries after removal, found: {remaining}" + ) + + # Confirm the service is no longer usable (no coordinator present) + no_integration_resp = ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json={**base, "command": "echo hi"}, + ) + assert no_integration_resp.status_code >= 400, ( + "Service should return 400 when the integration is not configured" + ) + + # The test started with no integration and ends with no integration – + # running it again will follow exactly the same path. diff --git a/tests/playwright/test_security.py b/tests/playwright/test_security.py new file mode 100644 index 0000000..8aff42f --- /dev/null +++ b/tests/playwright/test_security.py @@ -0,0 +1,173 @@ +"""Playwright E2E tests: SSH Command security properties.""" + +from __future__ import annotations + +from typing import Any + +import requests + +from conftest import HA_URL + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def execute(ha_api: requests.Session, payload: dict) -> requests.Response: + """Call the ssh_command.execute service.""" + return ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json=payload, + ) + + +def svc_data(resp: requests.Response) -> dict: + """Extract the ssh_command service response dict from an HA API response.""" + return resp.json().get("service_response", resp.json()) + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestSecurity: + """Tests that validate the security properties of the SSH Command integration.""" + + def test_invalid_password_rejected(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """An incorrect password results in a 400 authentication error.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": "definitely_wrong_password", + "command": "echo hi", + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_invalid_username_rejected(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """An incorrect username results in a 400 authentication error.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": "nonexistent_user_xyz", + "password": ssh_server_1["password"], + "command": "echo hi", + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_unreachable_host_rejected(self, ha_api: requests.Session, ensure_integration: Any) -> None: + """Connecting to an unreachable host results in a 400 connection error.""" + resp = execute( + ha_api, + { + "host": "192.0.2.255", # RFC 5737 TEST-NET – documentation address, typically unreachable + "username": "user", + "password": "pass", + "command": "echo hi", + "check_known_hosts": False, + "timeout": 5, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_nonexistent_host_rejected(self, ha_api: requests.Session, ensure_integration: Any) -> None: + """Connecting to a non-existent hostname results in a 400 DNS error.""" + resp = execute( + ha_api, + { + "host": "this.host.does.not.exist.invalid", # .invalid TLD is guaranteed non-resolvable (RFC 2606) + "username": "user", + "password": "pass", + "command": "echo hi", + "check_known_hosts": False, + "timeout": 5, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_nonexistent_key_file_rejected(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Referencing a key file that does not exist results in a validation error.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "key_file": "/nonexistent/path/id_rsa", + "command": "echo hi", + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_api_requires_authentication(self) -> None: + """Calling the HA service API without an auth token is rejected with 401.""" + resp = requests.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json={ + "host": "192.0.2.1", + "username": "user", + "password": "pass", + "command": "echo hi", + "check_known_hosts": False, + }, + timeout=10, + ) + assert resp.status_code == 401, resp.text + + def test_known_hosts_conflict_rejected(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Supplying known_hosts with check_known_hosts=False is rejected.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo hi", + "check_known_hosts": False, + "known_hosts": "/tmp/known_hosts_conflict", + }, + ) + assert resp.status_code >= 400, resp.text + + def test_no_credentials_rejected(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """A service call that omits both password and key_file is rejected.""" + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "command": "echo hi", + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_successful_auth_uses_encrypted_connection(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """A successful SSH command is executed (implying an encrypted SSH session).""" + # asyncssh always uses encrypted connections; we verify the round-trip succeeds. + resp = execute( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo encrypted_conn_ok", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert "encrypted_conn_ok" in svc_data(resp)["output"] diff --git a/tests/playwright/test_services.py b/tests/playwright/test_services.py new file mode 100644 index 0000000..5bf297e --- /dev/null +++ b/tests/playwright/test_services.py @@ -0,0 +1,183 @@ +"""Playwright E2E tests: ssh_command.execute service behaviour.""" + +from __future__ import annotations + +from typing import Any + +import requests + +from conftest import HA_URL + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def call_service(ha_api: requests.Session, payload: dict) -> requests.Response: + """POST to the ssh_command execute service and return the raw response.""" + return ha_api.post( + f"{HA_URL}/api/services/ssh_command/execute?return_response", + json=payload, + ) + + +def svc_data(resp: requests.Response) -> dict: + """Extract the ssh_command service response dict from an HA API response.""" + return resp.json().get("service_response", resp.json()) + + +# --------------------------------------------------------------------------- +# Tests +# --------------------------------------------------------------------------- + + +class TestServices: + """Tests focused on the HA service interface of SSH Command.""" + + def test_service_registered(self, ha_api: requests.Session, ensure_integration: Any) -> None: + """The ssh_command.execute service should appear in the HA services list.""" + resp = ha_api.get(f"{HA_URL}/api/services") + resp.raise_for_status() + services = resp.json() + domains = {svc["domain"] for svc in services} + assert "ssh_command" in domains + + ssh_services = next( + (svc for svc in services if svc["domain"] == "ssh_command"), None + ) + assert ssh_services is not None + assert "execute" in ssh_services.get("services", {}) + + def test_service_returns_response(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """The service returns a structured response with output/error/exit_status.""" + resp = call_service( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo response_test", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + data = svc_data(resp) + assert "output" in data + assert "error" in data + assert "exit_status" in data + + def test_service_echo_output(self, ha_api: requests.Session, ensure_integration: Any, ssh_server_1: dict) -> None: + """The service captures stdout from the remote command.""" + resp = call_service( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo service_output_check", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert "service_output_check" in svc_data(resp)["output"] + + def test_service_with_exit_status_error(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """A command that exits with a non-zero code is still returned as 200 with the exit code.""" + resp = call_service( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "exit 1", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert svc_data(resp)["exit_status"] == 1 + + def test_service_requires_integration_setup(self, ha_api: requests.Session) -> None: + """Calling the service without a configured integration returns 400.""" + # Make sure no integration is set up + entries_resp = ha_api.get(f"{HA_URL}/api/config/config_entries/entry") + for entry in entries_resp.json(): + if entry["domain"] == "ssh_command": + ha_api.delete( + f"{HA_URL}/api/config/config_entries/entry/{entry['entry_id']}" + ) + + resp = call_service( + ha_api, + { + "host": "192.0.2.1", + "username": "user", + "password": "pass", + "command": "echo hi", + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_service_validation_missing_auth(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """The service rejects calls that lack both password and key_file.""" + resp = call_service( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "command": "echo hi", + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_service_validation_missing_command_and_input(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """The service rejects calls that lack both command and input.""" + resp = call_service( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "check_known_hosts": False, + }, + ) + assert resp.status_code >= 400, resp.text + + def test_service_with_timeout_parameter(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """The timeout parameter is accepted and used by the service.""" + resp = call_service( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo timeout_test", + "check_known_hosts": False, + "timeout": 15, + }, + ) + assert resp.status_code == 200, resp.text + assert "timeout_test" in svc_data(resp)["output"] + + def test_service_stderr_in_response(self, ha_api: requests.Session, ensure_integration: Any, + ssh_server_1: dict) -> None: + """Stderr output appears in the 'error' field of the service response.""" + resp = call_service( + ha_api, + { + "host": ssh_server_1["host"], + "username": ssh_server_1["username"], + "password": ssh_server_1["password"], + "command": "echo err_msg >&2", + "check_known_hosts": False, + }, + ) + assert resp.status_code == 200, resp.text + assert "err_msg" in svc_data(resp)["error"]