#!/usr/bin/env bash # container.sh — generate Option B container scripts for a project generate_container_scripts() { local project_dir="$1" local project_name="$2" local slug="$3" local container_name="cs-${slug}" info "Generating container scripts (Option B)..." mkdir -p "$project_dir/bin" # ── bin/claude — wrapper that runs claude inside the agents container ── cat > "$project_dir/bin/claude" <&2 exit 1 fi if ! "\$RUNTIME" container inspect "\$CONTAINER_NAME" --format '{{.State.Running}}' 2>/dev/null | grep -q true; then echo "[claude-wrapper] Error: container '\$CONTAINER_NAME' is not running." >&2 echo "[claude-wrapper] Run ./start.sh from your project directory first." >&2 exit 1 fi # Use PWD as workdir only if it is inside the mounted project tree. # When CS Core's server process calls claude --version its cwd is the # core directory (~/.context-studio/core) which is NOT mounted, causing # podman exec --workdir to fail. Fall back to PROJECT_DIR in that case. if [[ "\$PWD" == "\$PROJECT_DIR"* ]]; then WORKDIR="\$PWD" else WORKDIR="\$PROJECT_DIR" fi # Pass through TTY if available, relay working directory into container if [ -t 0 ]; then exec "\$RUNTIME" exec -it --workdir "\$WORKDIR" "\$CONTAINER_NAME" claude "\$@" else exec "\$RUNTIME" exec -i --workdir "\$WORKDIR" "\$CONTAINER_NAME" claude "\$@" fi WRAPPER chmod +x "$project_dir/bin/claude" # ── start.sh — build image, start container, launch CS Core ────────── cat > "$project_dir/start.sh" <&2; exit 1 fi if [[ ! -d "\$CS_CORE" ]]; then echo "Error: context-studio-core not found at \$CS_CORE" >&2 echo "Run the Context Studio Wizard first." >&2; exit 1 fi # ── Build image if missing ─────────────────────────────────────────────── if ! "\$RUNTIME" image exists "\$IMAGE_NAME" 2>/dev/null; then echo "→ Building container image '\$IMAGE_NAME'..." "\$RUNTIME" build -t "\$IMAGE_NAME" "\$PROJECT_DIR/.devcontainer/" \ || { echo "Image build failed." >&2; exit 1; } fi # ── Stop stale container ───────────────────────────────────────────────── "\$RUNTIME" rm -f "\$CONTAINER_NAME" 2>/dev/null || true # ── Ensure ~/.anthropic exists (Claude Code stores auth/config here) ───── mkdir -p "\$HOME/.anthropic" # ── Start agents container ─────────────────────────────────────────────── # Mount project at the same absolute path so host and container paths match. # CS Core sets agent working dirs to host paths; the wrapper relays PWD. # Run as uid/gid of the current user so Claude Code doesn't run as root # (--dangerously-skip-permissions is blocked when running as root). echo "→ Starting agents container '\$CONTAINER_NAME'..." "\$RUNTIME" run -d \\ --name "\$CONTAINER_NAME" \\ --user "\$(id -u):\$(id -g)" \\ -v "\$PROJECT_DIR:\$PROJECT_DIR" \\ -v "\$HOME/.anthropic:\$HOME/.anthropic:ro" \\ -e ANTHROPIC_API_KEY="\${ANTHROPIC_API_KEY:-}" \\ -e CS_WORKFLOW_DIR="\$PROJECT_DIR/workflow" \\ -e PROJECT_ROOT_DIR="\$PROJECT_DIR" \\ -e HOME="\$HOME" \\ "\$IMAGE_NAME" \\ sleep infinity # ── Wait for container to be running ──────────────────────────────────── echo -n "→ Waiting for container..." for _i in 1 2 3 4 5 6 7 8 9 10; do if "\$RUNTIME" container inspect "\$CONTAINER_NAME" --format '{{.State.Running}}' 2>/dev/null | grep -q true; then echo " ready." break fi echo -n "." sleep 0.5 done if ! "\$RUNTIME" container inspect "\$CONTAINER_NAME" --format '{{.State.Running}}' 2>/dev/null | grep -q true; then echo "" echo "Error: container failed to start." >&2; exit 1 fi # ── Ensure core deps are installed (Electron binary lives here) ───────── if [[ ! -d "\$CS_CORE/app/node_modules" ]]; then echo "→ Installing context-studio-core dependencies (first run)..." (cd "\$CS_CORE" && npm install) || { echo "npm install failed." >&2; exit 1; } fi # ── Check display for Electron UI ─────────────────────────────────────── if [[ -z "\${DISPLAY:-}" && -z "\${WAYLAND_DISPLAY:-}" ]]; then echo "⚠ No display detected (DISPLAY / WAYLAND_DISPLAY not set)." echo " Electron UI may not open. Set DISPLAY=:0 or run from a desktop terminal." fi # ── Put claude wrapper first on PATH ──────────────────────────────────── # Electron inherits this PATH and passes it to the A2A server it spawns, # so the servers can locate the bin/claude wrapper inside the container. export PATH="\$PROJECT_DIR/bin:\$PATH" # ── Launch Context Studio (Electron manages A2A server startup) ────────── # The Electron app checks for a lock file; if servers aren't running it # spawns core/start.js --ui-mode=headless internally and waits for health. echo "→ Launching Context Studio..." CS_CORE_DIR="\$CS_CORE" \\ CS_WORKFLOW_DIR="\$PROJECT_DIR/workflow" \\ PROJECT_ROOT_DIR="\$PROJECT_DIR" \\ "\$CS_CORE/app/node_modules/.bin/electron" "\$CS_CORE/app" # ── Electron closed — stop container ───────────────────────────────────── echo "→ UI closed. Stopping agents container..." "\$RUNTIME" rm -f "\$CONTAINER_NAME" 2>/dev/null || true START chmod +x "$project_dir/start.sh" # ── stop.sh — forcefully stop the container ─────────────────────────── cat > "$project_dir/stop.sh" </dev/null && echo "→ Done." || echo "→ Container was not running." STOP chmod +x "$project_dir/stop.sh" # ── update.sh — update core, claude-code, and optionally OS packages ── cat > "$project_dir/update.sh" </dev/null | grep -q true; then CONTAINER_RUNNING=true fi # ── 1. Update Context Studio Core ─────────────────────────────────────── info "Updating Context Studio Core..." if [[ -d "\$CS_CORE/.git" ]]; then git -C "\$CS_CORE" pull --ff-only && success "Core updated." || warn "Core pull failed — continuing with current version." else warn "Core not found at \$CS_CORE — skipping." fi # ── 2. Update Claude Code in container ────────────────────────────────── info "Updating Claude Code (claude-code)..." if \$CONTAINER_RUNNING; then "\$RUNTIME" exec "\$CONTAINER_NAME" npm install -g @anthropic-ai/claude-code \ && success "Claude Code updated in running container." \ || warn "Claude Code update failed." else warn "Container not running — Claude Code will be updated on next image build." fi # ── 3. Update OS packages in container ────────────────────────────────── info "Updating OS packages in container..." if \$CONTAINER_RUNNING; then "\$RUNTIME" exec "\$CONTAINER_NAME" bash -c "apt-get update -qq && apt-get upgrade -y" \ && success "OS packages updated in running container." \ || warn "OS package update failed." else warn "Container not running — OS packages will be updated on next image build." fi # ── 4. Rebuild image to persist updates ───────────────────────────────── echo "" echo -e "\${BOLD}Rebuild container image?\${RESET}" echo -e " Rebuilds from scratch with latest base image + all packages." echo -e " ${YELLOW}⚠ Takes 5–15 minutes. Stops the running container.\${RESET}" echo -ne "Rebuild now? \${CYAN}[y/N]\${RESET}: " read -r _rebuild || true _rebuild="\${_rebuild:-n}" if [[ "\$_rebuild" =~ ^[Yy]\$ ]]; then info "Stopping container..." "\$RUNTIME" rm -f "\$CONTAINER_NAME" 2>/dev/null || true info "Rebuilding image '\$IMAGE_NAME' (--pull --no-cache)..." "\$RUNTIME" build --pull --no-cache -t "\$IMAGE_NAME" "\$PROJECT_DIR/.devcontainer/" \ && success "Image rebuilt. Run ./start.sh to start with the fresh image." \ || { echo "Image build failed." >&2; exit 1; } else info "Skipped image rebuild." if \$CONTAINER_RUNNING; then warn "In-container updates are temporary — they will be lost when the container is recreated." warn "Run ./update.sh again and choose 'y' to rebuild, or run ./start.sh (which recreates the container from the old image)." fi fi echo "" success "Update complete." UPDATE chmod +x "$project_dir/update.sh" success "Generated: start.sh stop.sh update.sh bin/claude" }