diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f54c3dc..b054a96 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -72,6 +72,19 @@ jobs: --build-arg COPILOT_API_VERSION="${{ steps.versions.outputs.copilot_api_version }}" \ . + - name: Build core and rust images via Makefile + run: | + make build-core build-rust-image \ + IMAGE_NAME=deva-smoke \ + TAG=ci \ + CORE_TAG=ci-core \ + RUST_TAG=ci-rust \ + CLAUDE_CODE_VERSION="${{ steps.versions.outputs.claude_code_version }}" \ + CODEX_VERSION="${{ steps.versions.outputs.codex_version }}" \ + GEMINI_CLI_VERSION="${{ steps.versions.outputs.gemini_cli_version }}" \ + ATLAS_CLI_VERSION="${{ steps.versions.outputs.atlas_cli_version }}" \ + COPILOT_API_VERSION="${{ steps.versions.outputs.copilot_api_version }}" + - name: Install and launch each agent without a TTY shell: bash run: | @@ -90,6 +103,50 @@ jobs: deva.sh codex -Q -- --version deva.sh gemini -Q -- --version + - name: Smoke Claude --chrome mount assembly + shell: bash + run: | + set -euo pipefail + tmp_root="$(mktemp -d)" + bridge_dir="$tmp_root/claude-mcp-browser-bridge-$(id -un)" + profile_dir="$tmp_root/chrome/Default" + + mkdir -p "$bridge_dir" + chmod 700 "$bridge_dir" + mkdir -p "$profile_dir/Extensions/fcoeoabgfenejglbffodgkkbkcdhcgfn/1.0.0" + + dry_run="$( + DEVA_DOCKER_IMAGE=deva-smoke \ + DEVA_DOCKER_TAG=ci \ + DEVA_CHROME_PROFILE_PATH="$profile_dir" \ + DEVA_HOST_CHROME_BRIDGE_DIR="$bridge_dir" \ + ./deva.sh claude --debug --dry-run -- --chrome 2>&1 + )" + + printf '%s\n' "$dry_run" + grep -F -- "$bridge_dir:/deva-host-chrome-bridge" <<<"$dry_run" + grep -F -- "$profile_dir/Extensions:/home/deva/.config/google-chrome/Default/Extensions:ro" <<<"$dry_run" + + - name: Smoke Chrome bridge entrypoint symlink + shell: bash + run: | + set -euo pipefail + tmp_root="$(mktemp -d)" + bridge_dir="$tmp_root/claude-mcp-browser-bridge-$(id -un)" + + mkdir -p "$bridge_dir" + chmod 700 "$bridge_dir" + + docker run --rm \ + -e DEVA_AGENT=claude \ + -e DEVA_UID="$(id -u)" \ + -e DEVA_GID="$(id -g)" \ + -e DEVA_CHROME_HOST_BRIDGE=1 \ + -e DEVA_CHROME_HOST_BRIDGE_DIR=/deva-host-chrome-bridge \ + -v "$bridge_dir:/deva-host-chrome-bridge" \ + deva-smoke:ci \ + bash -lc 'link="/tmp/claude-mcp-browser-bridge-$(id -un)"; test -L "$link"; test "$(readlink "$link")" = "/deva-host-chrome-bridge"' + docs: name: Docs Build runs-on: ubuntu-latest diff --git a/Dockerfile b/Dockerfile index 5e44a80..fb92507 100644 --- a/Dockerfile +++ b/Dockerfile @@ -165,8 +165,10 @@ ENV NPM_CONFIG_FETCH_RETRIES=5 \ NPM_CONFIG_FETCH_RETRY_FACTOR=2 \ NPM_CONFIG_FETCH_RETRY_MINTIMEOUT=10000 -# Final stage with shell setup -FROM tools AS final +# Stable agent base: user, shell, and shared runtimes. +# Keep volatile agent package installs out of this stage so downstream +# images can inherit it without rebuilding on every late-stage change. +FROM tools AS agent-base # Create non-root user for agent execution # Using 1001 as default to avoid conflicts with ubuntu user (usually 1000) @@ -208,43 +210,28 @@ RUN echo 'export ZSH="$HOME/.oh-my-zsh"' > "$DEVA_HOME/.zshrc" && \ RUN curl -LsSf https://astral.sh/uv/install.sh | sh && \ $DEVA_HOME/.local/bin/uv python install 3.14t +# Final image: install volatile agent packages on top of the stable base. +FROM agent-base AS final + # Declare ARGs immediately before usage to minimize cache invalidation -ARG CLAUDE_CODE_VERSION -ARG CODEX_VERSION -ARG GEMINI_CLI_VERSION=latest +ARG CLAUDE_CODE_VERSION=2.1.81 +ARG CODEX_VERSION=0.116.0 +ARG GEMINI_CLI_VERSION=0.35.0 # Record key tool versions as labels for quick inspection LABEL org.opencontainers.image.claude_code_version=${CLAUDE_CODE_VERSION} LABEL org.opencontainers.image.codex_version=${CODEX_VERSION} LABEL org.opencontainers.image.gemini_cli_version=${GEMINI_CLI_VERSION} -# Install CLI tools via npm -RUN --mount=type=cache,target=/home/deva/.npm,uid=${DEVA_UID},gid=${DEVA_GID},sharing=locked \ - set -eux && \ - npm config set prefix "$DEVA_HOME/.npm-global" && \ - npm install -g --no-audit --no-fund \ - @anthropic-ai/claude-code@${CLAUDE_CODE_VERSION} \ - @mariozechner/claude-trace \ - @openai/codex@${CODEX_VERSION} \ - @google/gemini-cli@${GEMINI_CLI_VERSION} && \ - npm cache clean --force && \ - "$DEVA_HOME/.npm-global/bin/claude" --version && \ - "$DEVA_HOME/.npm-global/bin/codex" --version && \ - "$DEVA_HOME/.npm-global/bin/gemini" --version && \ - "$DEVA_HOME/.npm-global/bin/claude-trace" --help >/dev/null && \ - (npm list -g --depth=0 @anthropic-ai/claude-code @openai/codex @google/gemini-cli || true) - -# Volatile packages: Install at the end to avoid cascading rebuilds -ARG ATLAS_CLI_VERSION=main +ARG ATLAS_CLI_VERSION=v0.1.4 LABEL org.opencontainers.image.atlas_cli_version=${ATLAS_CLI_VERSION} -# Install atlas-cli binary + skill via upstream install.sh -# - Uses prebuilt release tarball (faster than go install) -# - Falls back to go install if no prebuilt for platform -# - Installs skill with proper structure (SKILL.md + references/) -RUN curl -fsSL "https://raw.githubusercontent.com/lroolle/atlas-cli/${ATLAS_CLI_VERSION}/install.sh" \ - | bash -s -- --skill-dir $DEVA_HOME/.skills +COPY --chown=deva:deva scripts/install-agent-tooling.sh /tmp/install-agent-tooling.sh + +RUN --mount=type=cache,target=/home/deva/.npm,uid=${DEVA_UID},gid=${DEVA_GID},sharing=locked \ + bash /tmp/install-agent-tooling.sh && \ + rm -f /tmp/install-agent-tooling.sh USER root diff --git a/Dockerfile.rust b/Dockerfile.rust index c347e96..9f5c2c8 100644 --- a/Dockerfile.rust +++ b/Dockerfile.rust @@ -8,15 +8,26 @@ FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title="deva-rust" LABEL org.opencontainers.image.description="Rust development environment with full toolchain" +ARG CLAUDE_CODE_VERSION=2.1.81 +ARG CODEX_VERSION=0.116.0 +ARG GEMINI_CLI_VERSION=0.35.0 +ARG ATLAS_CLI_VERSION=v0.1.4 ARG RUST_TOOLCHAINS="stable" ARG RUST_TARGETS="wasm32-unknown-unknown" +LABEL org.opencontainers.image.claude_code_version=${CLAUDE_CODE_VERSION} +LABEL org.opencontainers.image.codex_version=${CODEX_VERSION} +LABEL org.opencontainers.image.gemini_cli_version=${GEMINI_CLI_VERSION} +LABEL org.opencontainers.image.atlas_cli_version=${ATLAS_CLI_VERSION} + SHELL ["/bin/bash", "-o", "pipefail", "-c"] ENV RUSTUP_HOME=/opt/rustup \ CARGO_HOME=/opt/cargo \ PATH=/opt/cargo/bin:$PATH +USER root + RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && \ @@ -67,3 +78,26 @@ RUN echo 'export PATH="/opt/cargo/bin:$PATH"' >> "$DEVA_HOME/.zshrc" && \ echo 'alias cw="cargo watch -x check -x test -x run"' >> "$DEVA_HOME/.zshrc" && \ echo 'alias cf="cargo fmt"' >> "$DEVA_HOME/.zshrc" && \ echo 'alias cl="cargo clippy"' >> "$DEVA_HOME/.zshrc" + +USER $DEVA_USER + +COPY --chown=deva:deva scripts/install-agent-tooling.sh /tmp/install-agent-tooling.sh + +RUN --mount=type=cache,target=/home/deva/.npm,uid=${DEVA_UID},gid=${DEVA_GID},sharing=locked \ + bash /tmp/install-agent-tooling.sh && \ + rm -f /tmp/install-agent-tooling.sh + +USER root + +COPY docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh +COPY scripts/deva-bridge-tmux /usr/local/bin/deva-bridge-tmux + +RUN chmod 755 /usr/local/bin/docker-entrypoint.sh && \ + chmod 755 /usr/local/bin/deva-bridge-tmux && \ + chmod -R 755 /usr/local/bin/scripts || true + +WORKDIR /root + +ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/docker-entrypoint.sh"] + +CMD ["claude"] diff --git a/Makefile b/Makefile index a171b1e..accae25 100644 --- a/Makefile +++ b/Makefile @@ -2,10 +2,12 @@ IMAGE_NAME := ghcr.io/thevibeworks/deva TAG := latest RUST_TAG := rust +CORE_TAG := core DOCKERFILE := Dockerfile RUST_DOCKERFILE := Dockerfile.rust MAIN_IMAGE := $(IMAGE_NAME):$(TAG) RUST_IMAGE := $(IMAGE_NAME):$(RUST_TAG) +CORE_IMAGE := $(IMAGE_NAME):$(CORE_TAG) CONTAINER_NAME := deva-$(shell basename $(PWD))-$(shell date +%s) # Smart image detection: auto-detect available image for version checking @@ -18,11 +20,11 @@ DETECTED_IMAGE := $(shell \ else \ echo "$(IMAGE_NAME):$(TAG)"; \ fi) -CLAUDE_CODE_VERSION := $(shell npm view @anthropic-ai/claude-code version 2>/dev/null || echo "2.0.1") -CODEX_VERSION := $(shell npm view @openai/codex version 2>/dev/null || echo "0.42.0") -GEMINI_CLI_VERSION := $(shell npm view @google/gemini-cli version 2>/dev/null || echo "latest") -ATLAS_CLI_VERSION := $(shell gh api repos/lroolle/atlas-cli/releases/latest --jq '.tag_name' 2>/dev/null || echo "v0.1.1") -COPILOT_API_VERSION := $(shell gh api repos/ericc-ch/copilot-api/branches/master --jq '.commit.sha' 2>/dev/null || echo "83cdfde17d7d3be36bd2493cc7592ff13be4928d") +CLAUDE_CODE_VERSION := $(shell npm view @anthropic-ai/claude-code version 2>/dev/null || echo "2.1.81") +CODEX_VERSION := $(shell npm view @openai/codex version 2>/dev/null || echo "0.116.0") +GEMINI_CLI_VERSION := $(shell npm view @google/gemini-cli version 2>/dev/null || echo "0.35.0") +ATLAS_CLI_VERSION := $(shell gh api repos/lroolle/atlas-cli/releases/latest --jq '.tag_name' 2>/dev/null || echo "v0.1.4") +COPILOT_API_VERSION := $(shell gh api repos/ericc-ch/copilot-api/branches/master --jq '.commit.sha' 2>/dev/null || echo "0ea08febdd7e3e055b03dd298bf57e669500b5c1") export DOCKER_BUILDKIT := 1 @@ -79,17 +81,33 @@ rebuild: @echo "✅ Rebuild completed: $(MAIN_IMAGE)" -.PHONY: build-rust -build-rust: +.PHONY: build-core +build-core: + @echo "🔨 Building stable core image..." + docker build -f $(DOCKERFILE) --target agent-base --build-arg COPILOT_API_VERSION=$(COPILOT_API_VERSION) -t $(CORE_IMAGE) . + @echo "✅ Core build completed: $(CORE_IMAGE)" + +.PHONY: build-rust-image +build-rust-image: @echo "🔨 Building Rust Docker image..." - docker build -f $(RUST_DOCKERFILE) --build-arg BASE_IMAGE=$(MAIN_IMAGE) -t $(RUST_IMAGE) . + docker build -f $(RUST_DOCKERFILE) \ + --build-arg BASE_IMAGE=$(CORE_IMAGE) \ + --build-arg CLAUDE_CODE_VERSION=$(CLAUDE_CODE_VERSION) \ + --build-arg CODEX_VERSION=$(CODEX_VERSION) \ + --build-arg GEMINI_CLI_VERSION=$(GEMINI_CLI_VERSION) \ + --build-arg ATLAS_CLI_VERSION=$(ATLAS_CLI_VERSION) \ + -t $(RUST_IMAGE) . @echo "✅ Rust build completed: $(RUST_IMAGE)" +.PHONY: build-rust +build-rust: build-core build-rust-image + .PHONY: build-all build-all: @echo "🔨 Building all images with versions: Claude $(CLAUDE_CODE_VERSION), Codex $(CODEX_VERSION), Gemini $(GEMINI_CLI_VERSION), Atlas $(ATLAS_CLI_VERSION), Copilot-API $(COPILOT_API_VERSION)..." + @$(MAKE) build-core COPILOT_API_VERSION=$(COPILOT_API_VERSION) @$(MAKE) build-main CLAUDE_CODE_VERSION=$(CLAUDE_CODE_VERSION) CODEX_VERSION=$(CODEX_VERSION) GEMINI_CLI_VERSION=$(GEMINI_CLI_VERSION) ATLAS_CLI_VERSION=$(ATLAS_CLI_VERSION) COPILOT_API_VERSION=$(COPILOT_API_VERSION) - @$(MAKE) build-rust BASE_IMAGE=$(MAIN_IMAGE) + @$(MAKE) build-rust-image CLAUDE_CODE_VERSION=$(CLAUDE_CODE_VERSION) CODEX_VERSION=$(CODEX_VERSION) GEMINI_CLI_VERSION=$(GEMINI_CLI_VERSION) ATLAS_CLI_VERSION=$(ATLAS_CLI_VERSION) COPILOT_API_VERSION=$(COPILOT_API_VERSION) @echo "✅ All images built successfully" .PHONY: buildx @@ -160,6 +178,7 @@ clean: @echo "Removing project images..." -docker rmi $(MAIN_IMAGE) 2>/dev/null || true -docker rmi $(RUST_IMAGE) 2>/dev/null || true + -docker rmi $(CORE_IMAGE) 2>/dev/null || true @echo "Pruning stopped containers..." -docker container prune -f @echo "Pruning unused images..." @@ -179,6 +198,7 @@ clean-all: @echo "Removing project images..." -docker rmi $(MAIN_IMAGE) 2>/dev/null || true -docker rmi $(RUST_IMAGE) 2>/dev/null || true + -docker rmi $(CORE_IMAGE) 2>/dev/null || true @echo "Removing ALL stopped containers..." -docker container prune -af @echo "Removing ALL dangling and unused images..." @@ -297,6 +317,7 @@ help: @echo "" @echo "Available targets:" @echo " build Build all images (auto-detects latest npm versions)" + @echo " build-core Build stable core image only" @echo " build-main Build main Docker image only" @echo " build-rust Build Rust Docker image" @echo " build-all Build all images (main + rust)" @@ -320,6 +341,7 @@ help: @echo " IMAGE_NAME Main image name (default: $(IMAGE_NAME))" @echo " TAG Docker image tag (default: $(TAG))" @echo " RUST_TAG Rust image tag (default: $(RUST_TAG))" + @echo " CORE_TAG Stable core image tag (default: $(CORE_TAG))" @echo " DOCKERFILE Dockerfile to use (default: $(DOCKERFILE))" @echo " RUST_DOCKERFILE Rust Dockerfile path (default: $(RUST_DOCKERFILE))" @echo " CLAUDE_CODE_VERSION Claude CLI version (default: $(CLAUDE_CODE_VERSION))" @@ -329,6 +351,7 @@ help: @echo "" @echo "Examples:" @echo " make build # Build all images with latest versions" + @echo " make build-core # Build stable core image only" @echo " make build-main # Build main image only" @echo " make build-rust # Build Rust image only" @echo " make TAG=dev build # Build all with custom tag" diff --git a/deva.sh b/deva.sh index eda67f5..49e93a6 100755 --- a/deva.sh +++ b/deva.sh @@ -39,6 +39,37 @@ LOADED_CONFIGS=() AGENT_ARGS=() AGENT_EXPLICIT=false +normalize_docker_image_parts() { + local tail="${DEVA_DOCKER_IMAGE##*/}" + + if [[ "$DEVA_DOCKER_IMAGE" == *@* ]]; then + DEVA_DOCKER_TAG="" + DEVA_DOCKER_TAG_ENV_SET=true + return + fi + + if [[ "$tail" == *:* ]]; then + local embedded_tag="${tail##*:}" + DEVA_DOCKER_IMAGE="${DEVA_DOCKER_IMAGE%:*}" + if [ "$DEVA_DOCKER_TAG_ENV_SET" = false ]; then + DEVA_DOCKER_TAG="$embedded_tag" + DEVA_DOCKER_TAG_ENV_SET=true + fi + fi +} + +docker_image_ref() { + if [[ "$DEVA_DOCKER_IMAGE" == *@* ]]; then + printf '%s' "$DEVA_DOCKER_IMAGE" + elif [ -n "${DEVA_DOCKER_TAG:-}" ]; then + printf '%s:%s' "$DEVA_DOCKER_IMAGE" "$DEVA_DOCKER_TAG" + else + printf '%s' "$DEVA_DOCKER_IMAGE" + fi +} + +normalize_docker_image_parts + EPHEMERAL_MODE=false QUICK_MODE=false GLOBAL_MODE=false @@ -88,9 +119,20 @@ Deva flags: --verbose, --debug Print full docker command before execution -- Everything after this sentinel is passed to the agent unchanged +Chrome integration for `claude -- --chrome`: + Set one of these in `.deva.local` or pass with `-e`: + DEVA_CHROME_PROFILE_PATH=/path/to/Profile 6 + Mount that profile's `Extensions/` tree for detection + DEVA_CHROME_PROFILE_NAME=Profile 6 + Override target profile name when source basename differs + DEVA_CHROME_USER_DATA_DIR=/path/to/Chrome user data + Scan `Default`/`Profile *` and mount only `Extensions/` + DEVA_HOST_CHROME_BRIDGE_DIR=/path/to/claude-mcp-browser-bridge-$USER + Override the exact host bridge directory if needed + Container Behavior (NEW in v0.8.0): Default (persistent): Shared per project by default, but split when container shape changes - (extra volumes, explicit config-home, auth mode). + (image/profile, extra volumes, explicit config-home, auth mode). Preserves state (npm packages, builds, etc). Faster startup, and default-auth runs can share one warm container. @@ -98,7 +140,7 @@ Container Behavior (NEW in v0.8.0): Agent-specific naming for parallel runs. Container Naming (NEW): - Persistent: deva--[..shape] # shape may encode volumes/config/auth + Persistent: deva--[..shape] # shape may encode image/volumes/config/auth Ephemeral: deva---- # Agent-specific Example: @@ -148,6 +190,13 @@ print(os.path.abspath(sys.argv[1])) PY } +canonical_path() { + python3 - "$1" <<'PY' +import os, sys +print(os.path.realpath(sys.argv[1])) +PY +} + default_config_home_for_agent() { local agent="$1" local xdg_home @@ -185,33 +234,37 @@ check_agent() { } check_image() { - if docker image inspect "${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG}" >/dev/null 2>&1; then + local image_ref + image_ref="$(docker_image_ref)" + + if docker image inspect "$image_ref" >/dev/null 2>&1; then return fi # Try pulling first - if docker pull "${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG}" >/dev/null 2>&1; then + if docker pull "$image_ref" >/dev/null 2>&1; then return fi - # Smart fallback: check for available profile images locally + # Smart fallback: check for available profile images locally. + # Digest-pinned refs are exact; tag fallback does not make sense there. local available_tags="" - local original_tag="$DEVA_DOCKER_TAG" - - # Check common profile tags (prefer rust as it's a superset of base) - for tag in rust latest; do - if [ "$tag" = "$DEVA_DOCKER_TAG" ]; then - continue # Skip the one we already tried - fi - if docker image inspect "${DEVA_DOCKER_IMAGE}:${tag}" >/dev/null 2>&1; then - available_tags="${available_tags}${tag} " - fi - done + if [[ "$DEVA_DOCKER_IMAGE" != *@* ]]; then + # Check common profile tags (prefer rust as it's a superset of base) + for tag in rust latest; do + if [ "$tag" = "$DEVA_DOCKER_TAG" ]; then + continue # Skip the one we already tried + fi + if docker image inspect "${DEVA_DOCKER_IMAGE}:${tag}" >/dev/null 2>&1; then + available_tags="${available_tags}${tag} " + fi + done + fi if [ -n "$available_tags" ]; then # Found alternative images - use the first one local fallback_tag="${available_tags%% *}" # Get first tag - echo "Image ${DEVA_DOCKER_IMAGE}:${original_tag} not found" >&2 + echo "Image $image_ref not found" >&2 echo "Using available image: ${DEVA_DOCKER_IMAGE}:${fallback_tag}" >&2 DEVA_DOCKER_TAG="$fallback_tag" return @@ -225,24 +278,24 @@ check_image() { ;; esac - echo "Docker image ${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG} not found locally" >&2 + echo "Docker image $image_ref not found locally" >&2 if [ -n "$df" ]; then echo "A matching Dockerfile exists at: $df" >&2 case "${PROFILE:-}" in rust) echo "Build with: make build-rust" >&2 - echo "or: docker build -f $df -t ghcr.io/thevibeworks/deva:rust \"$SCRIPT_DIR\"" >&2 + echo "Manual docker builds need explicit build args and BASE_IMAGE; see docs/custom-images.md" >&2 ;; "" | base) echo "Build with: make build" >&2 - echo "or: docker build -f Dockerfile -t ghcr.io/thevibeworks/deva:latest \"$SCRIPT_DIR\"" >&2 + echo "Manual docker builds need explicit build args; see docs/custom-images.md" >&2 ;; *) echo "Build with your Dockerfile and tag appropriately (e.g., :${PROFILE})" >&2 ;; esac else - echo "Pull with: docker pull ${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG}" >&2 + echo "Pull with: docker pull $image_ref" >&2 fi exit 1 } @@ -283,6 +336,237 @@ translate_localhost() { echo "$1" | sed 's/127\.0\.0\.1/host.docker.internal/g' | sed 's/localhost/host.docker.internal/g' } +claude_args_request_chrome() { + local arg + local wants_chrome=false + local disables_chrome=false + + for arg in "$@"; do + case "$arg" in + --chrome) + wants_chrome=true + ;; + --no-chrome) + disables_chrome=true + ;; + esac + done + + [ "$wants_chrome" = true ] && [ "$disables_chrome" = false ] +} + +get_host_tmpdir() { + local tmpdir="" + + if command -v node >/dev/null 2>&1; then + tmpdir=$(node -p 'require("os").tmpdir()' 2>/dev/null || true) + fi + + if [ -z "$tmpdir" ] && command -v python3 >/dev/null 2>&1; then + tmpdir=$(python3 - <<'PY' +import tempfile +print(tempfile.gettempdir()) +PY + ) + fi + + if [ -z "$tmpdir" ]; then + tmpdir="${TMPDIR:-/tmp}" + fi + + printf '%s' "$tmpdir" +} + +normalize_host_bind_path() { + local path="$1" + path="$(expand_tilde "$path")" + + if [[ "$path" == /* ]]; then + printf '%s' "$path" + return 0 + fi + + absolute_path "$path" +} + +configured_env_value() { + local name="$1" + local spec + + for spec in "${USER_ENVS[@]+"${USER_ENVS[@]}"}"; do + if [[ "$spec" == "$name="* ]]; then + printf '%s' "${spec#*=}" + return 0 + fi + if [ "$spec" = "$name" ] && [ -n "${!name-}" ]; then + printf '%s' "${!name}" + return 0 + fi + done + + if [ -n "${!name-}" ]; then + printf '%s' "${!name}" + return 0 + fi + + return 1 +} + +user_volume_mounts_target() { + local target="$1" + local spec remainder dest + + for spec in "${USER_VOLUMES[@]+"${USER_VOLUMES[@]}"}"; do + remainder="${spec#*:}" + dest="${remainder%%:*}" + if [ "$dest" = "$target" ]; then + return 0 + fi + done + + return 1 +} + +prepare_claude_chrome_detection_mount() { + local profile_path="" + local user_data_dir="" + local profile_name="" + local profile_target="" + local extensions_source="" + local found_profile=false + + profile_path="$(configured_env_value DEVA_CHROME_PROFILE_PATH || true)" + user_data_dir="$(configured_env_value DEVA_CHROME_USER_DATA_DIR || true)" + + if [ -n "$profile_path" ]; then + profile_path="$(normalize_host_bind_path "$profile_path")" + if [ ! -d "$profile_path" ]; then + echo "error: DEVA_CHROME_PROFILE_PATH does not exist: $profile_path" >&2 + exit 1 + fi + + profile_name="$(configured_env_value DEVA_CHROME_PROFILE_NAME || true)" + if [ -z "$profile_name" ]; then + profile_name="$(basename "$profile_path")" + fi + + case "$profile_name" in + Default | "Profile "*) + ;; + *) + echo "error: Chrome profile name must be 'Default' or 'Profile N'; got: $profile_name" >&2 + echo "hint: set DEVA_CHROME_PROFILE_NAME='Profile 6' if the source path basename is different" >&2 + exit 1 + ;; + esac + + extensions_source="$profile_path/Extensions" + if [ ! -d "$extensions_source" ]; then + echo "error: Chrome profile is missing Extensions directory: $extensions_source" >&2 + exit 1 + fi + + profile_target="/home/deva/.config/google-chrome/$profile_name/Extensions" + if ! user_volume_mounts_target "$profile_target"; then + USER_VOLUMES+=("$extensions_source:$profile_target:ro") + fi + return 0 + fi + + if [ -n "$user_data_dir" ]; then + user_data_dir="$(normalize_host_bind_path "$user_data_dir")" + if [ ! -d "$user_data_dir" ]; then + echo "error: DEVA_CHROME_USER_DATA_DIR does not exist: $user_data_dir" >&2 + exit 1 + fi + + if [ -d "$user_data_dir/Default" ]; then + extensions_source="$user_data_dir/Default/Extensions" + if [ -d "$extensions_source" ]; then + profile_target="/home/deva/.config/google-chrome/Default/Extensions" + if ! user_volume_mounts_target "$profile_target"; then + USER_VOLUMES+=("$extensions_source:$profile_target:ro") + fi + found_profile=true + fi + fi + + local candidate + for candidate in "$user_data_dir"/Profile\ *; do + [ -d "$candidate" ] || continue + extensions_source="$candidate/Extensions" + [ -d "$extensions_source" ] || continue + + profile_name="$(basename "$candidate")" + profile_target="/home/deva/.config/google-chrome/$profile_name/Extensions" + if ! user_volume_mounts_target "$profile_target"; then + USER_VOLUMES+=("$extensions_source:$profile_target:ro") + fi + found_profile=true + done + + if [ "$found_profile" = false ]; then + echo "error: DEVA_CHROME_USER_DATA_DIR has no Default/Profile */Extensions directories: $user_data_dir" >&2 + exit 1 + fi + fi +} + +resolve_claude_chrome_bridge_dir() { + local host_user="$1" + local configured_bridge_dir="" + local host_tmpdir="" + local host_bridge_dir="" + + configured_bridge_dir="$(configured_env_value DEVA_HOST_CHROME_BRIDGE_DIR || true)" + if [ -n "$configured_bridge_dir" ]; then + host_bridge_dir="$(normalize_host_bind_path "$configured_bridge_dir")" + else + host_tmpdir="$(get_host_tmpdir)" + local tmp_bridge_dir="$host_tmpdir/claude-mcp-browser-bridge-$host_user" + + # Claude's native host currently creates the bridge under /tmp, while the + # client also probes os.tmpdir() as an extra lookup path. Keep /tmp as + # the default mount target and only prefer os.tmpdir() when it already + # exists and /tmp does not. + host_bridge_dir="/tmp/claude-mcp-browser-bridge-$host_user" + + if [ ! -d "$host_bridge_dir" ] && [ "$host_tmpdir" != "/tmp" ] && [ -d "$tmp_bridge_dir" ]; then + host_bridge_dir="$tmp_bridge_dir" + fi + fi + + mkdir -p "$host_bridge_dir" + chmod 700 "$host_bridge_dir" 2>/dev/null || true + canonical_path "$host_bridge_dir" +} + +prepare_claude_chrome_bridge() { + [ "$ACTIVE_AGENT" = "claude" ] || return 0 + + if ! claude_args_request_chrome "${AGENT_ARGV[@]+"${AGENT_ARGV[@]}"}"; then + return 0 + fi + + local host_user + host_user="$(configured_env_value DEVA_CHROME_HOST_USER || true)" + if [ -z "$host_user" ]; then + host_user="$(id -un)" + fi + local host_bridge_dir + host_bridge_dir="$(resolve_claude_chrome_bridge_dir "$host_user")" + + prepare_claude_chrome_detection_mount + + local bridge_mount="/deva-host-chrome-bridge" + if ! user_volume_mounts_target "$bridge_mount"; then + USER_VOLUMES+=("$host_bridge_dir:$bridge_mount") + fi + USER_ENVS+=("DEVA_CHROME_HOST_BRIDGE=1") + USER_ENVS+=("DEVA_CHROME_HOST_USER=$host_user") + USER_ENVS+=("DEVA_CHROME_HOST_BRIDGE_DIR=$bridge_mount") +} + append_unique_line() { local list="$1" local item="$2" @@ -609,7 +893,7 @@ show_config() { fi echo "" - echo "Docker image: ${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG}" + echo "Docker image: $(docker_image_ref)" echo "Container prefix: $DEVA_CONTAINER_PREFIX" } @@ -634,18 +918,14 @@ prepare_base_docker_args() { config_hash_source="$CONFIG_ROOT" fi fi - if [ -n "$config_hash_source" ]; then - if command -v md5sum >/dev/null 2>&1; then - config_hash=$(printf '%s' "$config_hash_source" | md5sum | cut -c1-6) - elif command -v shasum >/dev/null 2>&1; then - config_hash=$(printf '%s' "$config_hash_source" | shasum | cut -c1-6) - else - config_hash=$(printf '%s' "$config_hash_source" | cksum | cut -d' ' -f1 | cut -c1-6) - fi - fi + [ -n "$config_hash_source" ] && config_hash=$(short_hash "$config_hash_source" 6) + + local image_hash="" + image_hash=$(short_hash "$(docker_image_ref)" 6) local suffix="" - [ -n "$volume_hash" ] && suffix="..v${volume_hash}" + [ -n "$image_hash" ] && suffix="..i${image_hash}" + [ -n "$volume_hash" ] && suffix="${suffix}..v${volume_hash}" [ -n "$config_hash" ] && suffix="${suffix}..c${config_hash}" if [ "$EPHEMERAL_MODE" = true ]; then @@ -677,10 +957,14 @@ prepare_base_docker_args() { --label "deva.workspace_hash=${ws_hash}" --label "deva.agent=${ACTIVE_AGENT}" --label "deva.ephemeral=${EPHEMERAL_MODE}" + --label "deva.image=$(docker_image_ref)" ) if [ -n "$volume_hash" ]; then DOCKER_ARGS+=(--label "deva.volhash=${volume_hash}") fi + if [ -n "$image_hash" ]; then + DOCKER_ARGS+=(--label "deva.image_hash=${image_hash}") + fi if [ -n "${LANG:-}" ]; then DOCKER_ARGS+=(-e "LANG=$LANG"); fi if [ -n "${LC_ALL:-}" ]; then DOCKER_ARGS+=(-e "LC_ALL=$LC_ALL"); fi @@ -1139,6 +1423,19 @@ normalize_volume_spec() { echo "$src:$remainder" } +short_hash() { + local input="$1" + local length="${2:-8}" + + if command -v md5sum >/dev/null 2>&1; then + printf '%s' "$input" | md5sum | cut -c1-"$length" + elif command -v shasum >/dev/null 2>&1; then + printf '%s' "$input" | shasum | cut -c1-"$length" + else + printf '%s' "$input" | cksum | cut -d' ' -f1 | cut -c1-"$length" + fi +} + compute_volume_hash() { if [ ${#USER_VOLUMES[@]} -eq 0 ]; then return @@ -1161,15 +1458,7 @@ compute_volume_hash() { hash_input="${hash_input}${src}:${vol#*:}|" done <<<"$sorted_vols" - if [ -n "$hash_input" ]; then - if command -v md5sum >/dev/null 2>&1; then - echo "$hash_input" | md5sum | cut -c1-8 - elif command -v shasum >/dev/null 2>&1; then - echo "$hash_input" | shasum | cut -c1-8 - else - echo "$hash_input" | cksum | cut -d' ' -f1 | cut -c1-8 - fi - fi + [ -n "$hash_input" ] && short_hash "$hash_input" 8 } workspace_hash() { @@ -1180,13 +1469,7 @@ workspace_hash() { local p p="$(pwd)" - if command -v md5sum >/dev/null 2>&1; then - _WS_HASH_CACHE=$(printf '%s' "$p" | md5sum | cut -c1-8) - elif command -v shasum >/dev/null 2>&1; then - _WS_HASH_CACHE=$(printf '%s' "$p" | shasum | cut -c1-8) - else - _WS_HASH_CACHE=$(printf '%s' "$p" | cksum | cut -d' ' -f1 | cut -c1-8) - fi + _WS_HASH_CACHE=$(short_hash "$p" 8) printf '%s' "$_WS_HASH_CACHE" } @@ -1399,6 +1682,20 @@ process_var_config() { set_config_home_value "$value" fi ;; + DEVA_DOCKER_IMAGE) + DEVA_DOCKER_IMAGE="$value" + DEVA_DOCKER_IMAGE_ENV_SET=true + normalize_docker_image_parts + export DEVA_DOCKER_IMAGE + USER_ENVS+=("$name=$value") + ;; + DEVA_DOCKER_TAG) + DEVA_DOCKER_TAG="$value" + DEVA_DOCKER_TAG_ENV_SET=true + normalize_docker_image_parts + export DEVA_DOCKER_TAG + USER_ENVS+=("$name=$value") + ;; DEFAULT_AGENT) DEFAULT_AGENT="$value" ;; @@ -1716,7 +2013,7 @@ if [ ${#PRE_ARGS[@]} -gt 0 ]; then ;; --version) echo "deva.sh v${VERSION}" - echo "Docker Image: ${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG}" + echo "Docker Image: $(docker_image_ref)" exit 0 ;; --show-config) @@ -1766,6 +2063,7 @@ if [ "$MANAGEMENT_MODE" = "shell" ] || [ "$MANAGEMENT_MODE" = "ps" ] || [ "$MANA if [ -z "$ACTIVE_AGENT" ]; then ACTIVE_AGENT="$DEFAULT_AGENT" fi + resolve_profile show_config exit 0 fi @@ -2126,6 +2424,7 @@ autolink_legacy_into_deva_root() { } check_agent "$ACTIVE_AGENT" +prepare_claude_chrome_bridge if [ -n "$CONFIG_HOME" ] && [ "$DRY_RUN" != true ]; then if [ ! -d "$CONFIG_HOME" ]; then @@ -2232,26 +2531,14 @@ if [ -n "${AUTH_METHOD:-}" ]; then auth_config_src="$CONFIG_ROOT" fi fi - if [ -n "$auth_config_src" ]; then - if command -v md5sum >/dev/null 2>&1; then - auth_config_hash=$(printf '%s' "$auth_config_src" | md5sum | cut -c1-6) - elif command -v shasum >/dev/null 2>&1; then - auth_config_hash=$(printf '%s' "$auth_config_src" | shasum | cut -c1-6) - else - auth_config_hash=$(printf '%s' "$auth_config_src" | cksum | cut -d' ' -f1 | cut -c1-6) - fi - fi + [ -n "$auth_config_src" ] && auth_config_hash=$(short_hash "$auth_config_src" 6) + + image_hash=$(short_hash "$(docker_image_ref)" 6) # Hash credential file path for credentials-file auth creds_hash="" if [ "$AUTH_METHOD" = "credentials-file" ] && [ -n "${CUSTOM_CREDENTIALS_FILE:-}" ]; then - if command -v md5sum >/dev/null 2>&1; then - creds_hash=$(printf '%s' "$CUSTOM_CREDENTIALS_FILE" | md5sum | cut -c1-8) - elif command -v shasum >/dev/null 2>&1; then - creds_hash=$(printf '%s' "$CUSTOM_CREDENTIALS_FILE" | shasum | cut -c1-8) - else - creds_hash=$(printf '%s' "$CUSTOM_CREDENTIALS_FILE" | cksum | cut -d' ' -f1 | cut -c1-8) - fi + creds_hash=$(short_hash "$CUSTOM_CREDENTIALS_FILE" 8) fi new_container_name="" @@ -2265,7 +2552,8 @@ if [ -n "${AUTH_METHOD:-}" ]; then # Build suffix chain: volume + config + auth name_suffix="" - [ -n "$volume_hash" ] && name_suffix="..v${volume_hash}" + [ -n "$image_hash" ] && name_suffix="..i${image_hash}" + [ -n "$volume_hash" ] && name_suffix="${name_suffix}..v${volume_hash}" [ -n "$auth_config_hash" ] && name_suffix="${name_suffix}..c${auth_config_hash}" name_suffix="${name_suffix}..${auth_suffix}" @@ -2360,7 +2648,7 @@ if [ "$QUICK_MODE" = false ] && [ -d "$(pwd)/.claude" ]; then DOCKER_ARGS+=("-v" "$(pwd)/.claude:$(pwd)/.claude") fi -DOCKER_ARGS+=("${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG}") +DOCKER_ARGS+=("$(docker_image_ref)") if [ "$ACTION" = "shell" ]; then AGENT_COMMAND=("/bin/zsh") @@ -2479,7 +2767,7 @@ if [ "$EPHEMERAL_MODE" = false ]; then exec docker exec "${DOCKER_TERMINAL_ARGS[@]}" "$CONTAINER_NAME" /usr/local/bin/docker-entrypoint.sh "${AGENT_COMMAND[@]}" else - echo "Launching ${ACTIVE_AGENT} (ephemeral mode) via ${DEVA_DOCKER_IMAGE}:${DEVA_DOCKER_TAG}" + echo "Launching ${ACTIVE_AGENT} (ephemeral mode) via $(docker_image_ref)" write_session_file if ! docker "${DOCKER_ARGS[@]}" "${AGENT_COMMAND[@]}"; then echo "error: failed to launch ephemeral container" >&2 diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh index d0d07d9..f5b6dc1 100644 --- a/docker-entrypoint.sh +++ b/docker-entrypoint.sh @@ -249,6 +249,43 @@ fix_docker_socket_permissions() { fi } +get_node_tmpdir() { + local tmpdir="${TMPDIR:-/tmp}" + if command -v node >/dev/null 2>&1; then + local detected + detected=$(node -p 'require("os").tmpdir()' 2>/dev/null || true) + if [ -n "$detected" ]; then + tmpdir="$detected" + fi + fi + printf '%s' "$tmpdir" +} + +setup_claude_chrome_bridge() { + if [ "${DEVA_CHROME_HOST_BRIDGE:-}" != "1" ]; then + return 0 + fi + + local host_bridge_dir="${DEVA_CHROME_HOST_BRIDGE_DIR:-/deva-host-chrome-bridge}" + if [ ! -d "$host_bridge_dir" ]; then + echo "[entrypoint] error: Claude in Chrome bridge mount missing: $host_bridge_dir" >&2 + exit 1 + fi + + local container_tmpdir + container_tmpdir="$(get_node_tmpdir)" + mkdir -p "$container_tmpdir" + + local container_socket_path="$container_tmpdir/claude-mcp-browser-bridge-$DEVA_USER" + rm -f "$container_socket_path" 2>/dev/null || true + ln -snf "$host_bridge_dir" "$container_socket_path" + chown -h "$DEVA_UID:$DEVA_GID" "$container_socket_path" 2>/dev/null || true + + if [ "$VERBOSE" = "true" ]; then + echo "[entrypoint] Claude in Chrome bridge: $container_socket_path -> $host_bridge_dir" + fi +} + build_gosu_env_cmd() { local user="$1" shift @@ -298,6 +335,7 @@ main() { fi setup_nonroot_user + setup_claude_chrome_bridge fix_rust_permissions fix_docker_socket_permissions ensure_agent_binaries diff --git a/docs/custom-images.md b/docs/custom-images.md index 7f898cc..9eb65b2 100644 --- a/docs/custom-images.md +++ b/docs/custom-images.md @@ -37,16 +37,47 @@ Important detail: ## Build A Local Image -Base image: +Supported path: ```bash -docker build -t deva-local:latest . +make build-main +make build-rust ``` -Rust profile image: +If you only changed the late agent-install layer and want the fastest rebuild: ```bash -docker build -f Dockerfile.rust -t deva-local:rust . +make build-core +make build-rust-image +``` + +`build-rust-image` uses the local `:core` image as its parent so late +changes to the agent layer do not force the Rust apt layer to rerun. + +Manual `docker build` is still possible, but it is an advanced path now. +Do not rely on the Dockerfile defaults for release images. Pass explicit +tool versions, and point the Rust build at the local core image: + +```bash +bash ./scripts/resolve-tool-versions.sh + +docker build -t deva-local:latest \ + --build-arg CLAUDE_CODE_VERSION= \ + --build-arg CODEX_VERSION= \ + --build-arg GEMINI_CLI_VERSION= \ + --build-arg ATLAS_CLI_VERSION= \ + --build-arg COPILOT_API_VERSION= \ + . + +docker build -f Dockerfile --target agent-base -t deva-local:core . + +docker build -f Dockerfile.rust -t deva-local:rust \ + --build-arg BASE_IMAGE=deva-local:core \ + --build-arg CLAUDE_CODE_VERSION= \ + --build-arg CODEX_VERSION= \ + --build-arg GEMINI_CLI_VERSION= \ + --build-arg ATLAS_CLI_VERSION= \ + . ``` Then run deva against it: diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2fb067a..ce2e666 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -105,6 +105,68 @@ If the agent cannot reach a local proxy: For Copilot proxy mode, deva also adds `NO_PROXY` and `no_grpc_proxy` entries for the local proxy hostnames. +## Claude `--chrome` Still Cannot See The Extension + +Symptom: + +- Claude reports Chrome mode enabled +- extension status still says not detected + +What deva now does for `deva.sh claude -- --chrome`: + +- mounts either: + - one configured Chrome profile `Extensions/` dir read-only at `/home/deva/.config/google-chrome/Profile N/Extensions`, or + - every detected `Default`/`Profile *` `Extensions/` dir under a configured user-data root +- mounts the host Chrome bridge dir at `/deva-host-chrome-bridge` +- inside the patched container entrypoint, creates the socket Claude expects: + - `/claude-mcp-browser-bridge-deva` + - symlinked to `/deva-host-chrome-bridge` + +Check: + +```bash +deva.sh claude --debug --dry-run -- --chrome +deva.sh shell +``` + +Look for: + +- `.../Profile 6/Extensions:/home/deva/.config/google-chrome/Profile 6/Extensions:ro` +- or `.../Default/Extensions:/home/deva/.config/google-chrome/Default/Extensions:ro` +- `/deva-host-chrome-bridge` +- inside the container: `ls -l "$(node -p 'require(\"os\").tmpdir()')"/claude-mcp-browser-bridge-deva` + +If your extension lives in a non-default Chrome profile, tell deva where it is. Put this in `.deva.local`: + +```text +DEVA_CHROME_PROFILE_PATH=/actual/path/to/Profile 6 +``` + +If the source directory is not literally named `Profile 6`, also set: + +```text +DEVA_CHROME_PROFILE_NAME=Profile 6 +``` + +If you prefer pointing deva at the browser user-data root instead: + +```text +DEVA_CHROME_USER_DATA_DIR=/actual/path/to/Chrome +``` + +If deva guessed the wrong host bridge directory, override it explicitly: + +```text +DEVA_HOST_CHROME_BRIDGE_DIR=/actual/path/to/claude-mcp-browser-bridge-$USER +``` + +Reality check: + +- deva does not install the host native messaging manifest for you +- Chrome on the host still needs a working host-side `claude --chrome-native-host` setup +- if the host socket file is absent, the extension may be installed but not yet connected +- the published image still needs to be rebuilt with the patched `docker-entrypoint.sh`; otherwise the socket symlink is never created + ## Dry-Run Looks Fine But Runtime Fails That is normal in at least three cases: diff --git a/install.sh b/install.sh index 2f65a0a..8eedd28 100644 --- a/install.sh +++ b/install.sh @@ -24,35 +24,54 @@ image_ref() { return fi + if [[ "$tail" == *:* ]]; then + if [ -n "$tag" ]; then + printf '%s:%s' "${repo%:*}" "$tag" + else + printf '%s' "$repo" + fi + return + fi + if [ -n "$tag" ]; then printf '%s:%s' "$repo" "$tag" return fi - if [[ "$tail" == *:* ]]; then - printf '%s' "$repo" + printf '%s:%s' "$repo" "$default_tag" +} + +embedded_image_tag() { + local repo="$1" + local tail="${repo##*/}" + + if [[ "$repo" == *@* ]]; then return fi - printf '%s:%s' "$repo" "$default_tag" + if [[ "$tail" == *:* ]]; then + printf '%s' "${tail##*:}" + fi } +SOURCE_DOCKER_TAG="${DEVA_DOCKER_TAG:-$(embedded_image_tag "${DEVA_DOCKER_IMAGE:-}")}" + if [ -n "${DEVA_DOCKER_IMAGE+x}" ]; then - DOCKER_IMAGE="$(image_ref "$DEVA_DOCKER_IMAGE" "${DEVA_DOCKER_TAG:-}" "latest")" + DOCKER_IMAGE="$(image_ref "$DEVA_DOCKER_IMAGE" "$SOURCE_DOCKER_TAG" "latest")" else - DOCKER_IMAGE="$(image_ref "ghcr.io/thevibeworks/deva" "${DEVA_DOCKER_TAG:-}" "latest")" + DOCKER_IMAGE="$(image_ref "ghcr.io/thevibeworks/deva" "$SOURCE_DOCKER_TAG" "latest")" fi if [ -n "${DEVA_DOCKER_IMAGE_FALLBACK+x}" ]; then if [ -n "$DEVA_DOCKER_IMAGE_FALLBACK" ]; then - DOCKER_IMAGE_FALLBACK="$(image_ref "$DEVA_DOCKER_IMAGE_FALLBACK" "${DEVA_DOCKER_IMAGE_FALLBACK_TAG:-${DEVA_DOCKER_TAG:-}}" "latest")" + DOCKER_IMAGE_FALLBACK="$(image_ref "$DEVA_DOCKER_IMAGE_FALLBACK" "${DEVA_DOCKER_IMAGE_FALLBACK_TAG:-$SOURCE_DOCKER_TAG}" "latest")" else DOCKER_IMAGE_FALLBACK="" fi else - DOCKER_IMAGE_FALLBACK="$(image_ref "thevibeworks/deva" "${DEVA_DOCKER_IMAGE_FALLBACK_TAG:-${DEVA_DOCKER_TAG:-}}" "latest")" + DOCKER_IMAGE_FALLBACK="$(image_ref "thevibeworks/deva" "${DEVA_DOCKER_IMAGE_FALLBACK_TAG:-$SOURCE_DOCKER_TAG}" "latest")" fi - + 5d3298f (fix(deva): support full docker image refs) echo "deva installer" echo "==============" echo "" diff --git a/scripts/install-agent-tooling.sh b/scripts/install-agent-tooling.sh new file mode 100644 index 0000000..a1cf099 --- /dev/null +++ b/scripts/install-agent-tooling.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -euo pipefail + +: "${DEVA_HOME:?DEVA_HOME is required}" +: "${CLAUDE_CODE_VERSION:?CLAUDE_CODE_VERSION is required}" +: "${CODEX_VERSION:?CODEX_VERSION is required}" +: "${GEMINI_CLI_VERSION:?GEMINI_CLI_VERSION is required}" + +ATLAS_CLI_VERSION="${ATLAS_CLI_VERSION:-v0.1.4}" + +npm config set prefix "$DEVA_HOME/.npm-global" +npm install -g --no-audit --no-fund \ + "@anthropic-ai/claude-code@${CLAUDE_CODE_VERSION}" \ + @mariozechner/claude-trace \ + "@openai/codex@${CODEX_VERSION}" \ + "@google/gemini-cli@${GEMINI_CLI_VERSION}" +npm cache clean --force + +"$DEVA_HOME/.npm-global/bin/claude" --version +"$DEVA_HOME/.npm-global/bin/codex" --version +"$DEVA_HOME/.npm-global/bin/gemini" --version +"$DEVA_HOME/.npm-global/bin/claude-trace" --help >/dev/null +(npm list -g --depth=0 @anthropic-ai/claude-code @openai/codex @google/gemini-cli || true) + +curl -fsSL "https://raw.githubusercontent.com/lroolle/atlas-cli/${ATLAS_CLI_VERSION}/install.sh" \ + | bash -s -- --skill-dir "$DEVA_HOME/.skills"