- start.sh: auto-register project in ~/.config/context-studio/projects/ before launching Electron — without this acquireProjectLock() silently skips writing the lock file, waitForServers() never finds the registry port, all agent ports stay null (localhost:null errors) - start.sh: mount all known Claude Code credential locations into container (~/.claude/.credentials.json, ~/.claude.json, $CLAUDE_CONFIG_DIR variants) not just ~/.anthropic which was empty on this system - bin/claude: create /tmp/cs-ready-<agentId> on host after 3s delay so CS Core's CLI ready marker poll resolves instead of timing out after 10s - workflow.sh: add hasTrustDialogAccepted:true to all agent settings.json so claude goes straight to priming without the folder trust dialog - prereqs.sh: add ensure_api_key() — checks all credential locations, prompts with masked input if none found, offers to save to shell profile - wizard.sh: trap SIGINT for graceful abort — gum confirm popup, reverts created project dir and cloned core dir, leaves installed packages untouched - core.sh: set _WIZARD_CORE_CLONED=true before clone for cleanup tracking - electron-config.js: increase serverStartupTimeout 30s→90s (config file in core/config/, not source — safe to edit) Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
310 lines
14 KiB
Bash
310 lines
14 KiB
Bash
#!/usr/bin/env bash
|
||
# container.sh — generate Option B container scripts for a project
|
||
|
||
generate_container_scripts() {
|
||
local project_dir="$1"
|
||
local project_name="$2"
|
||
local slug="$3"
|
||
local container_name="cs-${slug}"
|
||
|
||
info "Generating container scripts (Option B)..."
|
||
|
||
mkdir -p "$project_dir/bin"
|
||
|
||
# ── bin/claude — wrapper that runs claude inside the agents container ──
|
||
cat > "$project_dir/bin/claude" <<WRAPPER
|
||
#!/usr/bin/env bash
|
||
# Claude Code wrapper — runs claude inside the agents container.
|
||
# CS Core on the host calls this instead of the real claude binary.
|
||
CONTAINER_NAME="${container_name}"
|
||
PROJECT_DIR="${project_dir}"
|
||
RUNTIME="\$(command -v podman || command -v docker || true)"
|
||
|
||
if [[ -z "\$RUNTIME" ]]; then
|
||
echo "[claude-wrapper] Error: podman or docker not found" >&2
|
||
exit 1
|
||
fi
|
||
|
||
if ! "\$RUNTIME" container inspect "\$CONTAINER_NAME" --format '{{.State.Running}}' 2>/dev/null | grep -q true; then
|
||
echo "[claude-wrapper] Error: container '\$CONTAINER_NAME' is not running." >&2
|
||
echo "[claude-wrapper] Run ./start.sh from your project directory first." >&2
|
||
exit 1
|
||
fi
|
||
|
||
# Use PWD as workdir only if it is inside the mounted project tree.
|
||
# When CS Core's server process calls claude --version its cwd is the
|
||
# core directory (~/.context-studio/core) which is NOT mounted, causing
|
||
# podman exec --workdir to fail. Fall back to PROJECT_DIR in that case.
|
||
if [[ "\$PWD" == "\$PROJECT_DIR"* ]]; then
|
||
WORKDIR="\$PWD"
|
||
else
|
||
WORKDIR="\$PROJECT_DIR"
|
||
fi
|
||
|
||
# ── CS Core ready marker ────────────────────────────────────────────────
|
||
# CS Core polls /tmp/cs-ready-<agentId> on the host to know when the CLI
|
||
# banner is visible and /prime can be injected. Claude runs inside the
|
||
# container so it cannot create this file on the host itself.
|
||
# We infer the agent ID from the PTY working directory (CS Core sets it to
|
||
# workflow/agents/<agentId>) and create the marker after a short delay.
|
||
_is_interactive=true
|
||
for _arg in "\$@"; do
|
||
case "\$_arg" in --version|--help|-h|-v) _is_interactive=false; break ;; esac
|
||
done
|
||
if [[ "\$_is_interactive" == "true" && "\$PWD" == "\$PROJECT_DIR/workflow/agents/"* ]]; then
|
||
_AGENT_ID="\$(basename "\$PWD")"
|
||
(sleep 3 && touch "/tmp/cs-ready-\$_AGENT_ID") &
|
||
fi
|
||
|
||
# Pass through TTY if available, relay working directory into container
|
||
if [ -t 0 ]; then
|
||
exec "\$RUNTIME" exec -it --workdir "\$WORKDIR" "\$CONTAINER_NAME" claude "\$@"
|
||
else
|
||
exec "\$RUNTIME" exec -i --workdir "\$WORKDIR" "\$CONTAINER_NAME" claude "\$@"
|
||
fi
|
||
WRAPPER
|
||
chmod +x "$project_dir/bin/claude"
|
||
|
||
# ── start.sh — build image, start container, launch CS Core ──────────
|
||
cat > "$project_dir/start.sh" <<START
|
||
#!/usr/bin/env bash
|
||
set -uo pipefail
|
||
SCRIPT_DIR="\$(cd "\$(dirname "\${BASH_SOURCE[0]}")" && pwd)"
|
||
PROJECT_DIR="\$SCRIPT_DIR"
|
||
CONTAINER_NAME="${container_name}"
|
||
IMAGE_NAME="${slug}"
|
||
RUNTIME="\$(command -v podman || command -v docker || true)"
|
||
CS_CORE="\${CS_CORE_DIR:-\$HOME/.context-studio/core}"
|
||
|
||
if [[ -z "\$RUNTIME" ]]; then
|
||
echo "Error: podman or docker not found." >&2; exit 1
|
||
fi
|
||
if [[ ! -d "\$CS_CORE" ]]; then
|
||
echo "Error: context-studio-core not found at \$CS_CORE" >&2
|
||
echo "Run the Context Studio Wizard first." >&2; exit 1
|
||
fi
|
||
|
||
# ── Build image if missing ───────────────────────────────────────────────
|
||
if ! "\$RUNTIME" image exists "\$IMAGE_NAME" 2>/dev/null; then
|
||
echo "→ Building container image '\$IMAGE_NAME'..."
|
||
"\$RUNTIME" build -t "\$IMAGE_NAME" "\$PROJECT_DIR/.devcontainer/" \
|
||
|| { echo "Image build failed." >&2; exit 1; }
|
||
fi
|
||
|
||
# ── Stop stale container ─────────────────────────────────────────────────
|
||
"\$RUNTIME" rm -f "\$CONTAINER_NAME" 2>/dev/null || true
|
||
|
||
# ── Ensure ~/.anthropic exists (Claude Code stores auth/config here) ─────
|
||
mkdir -p "\$HOME/.anthropic"
|
||
|
||
# ── Build credential mounts ───────────────────────────────────────────────
|
||
# Claude Code may store credentials in various locations depending on version
|
||
# and whether CLAUDE_CONFIG_DIR is set. Mount whichever files exist.
|
||
_CREDS_ARGS=()
|
||
_CREDS_ARGS+=("-v" "\$HOME/.anthropic:\$HOME/.anthropic:ro")
|
||
_claude_dir="\${CLAUDE_CONFIG_DIR:-\$HOME/.claude}"
|
||
if [[ -f "\$_claude_dir/.credentials.json" ]]; then
|
||
_CREDS_ARGS+=("-v" "\$_claude_dir/.credentials.json:\$_claude_dir/.credentials.json:ro")
|
||
fi
|
||
if [[ -f "\$HOME/.claude.json" ]]; then
|
||
_CREDS_ARGS+=("-v" "\$HOME/.claude.json:\$HOME/.claude.json:ro")
|
||
fi
|
||
if [[ -n "\${CLAUDE_CONFIG_DIR:-}" && -f "\$CLAUDE_CONFIG_DIR/.claude.json" ]]; then
|
||
_CREDS_ARGS+=("-v" "\$CLAUDE_CONFIG_DIR/.claude.json:\$CLAUDE_CONFIG_DIR/.claude.json:ro")
|
||
fi
|
||
|
||
# ── Start agents container ───────────────────────────────────────────────
|
||
# Mount project at the same absolute path so host and container paths match.
|
||
# CS Core sets agent working dirs to host paths; the wrapper relays PWD.
|
||
# Run as uid/gid of the current user so Claude Code doesn't run as root
|
||
# (--dangerously-skip-permissions is blocked when running as root).
|
||
echo "→ Starting agents container '\$CONTAINER_NAME'..."
|
||
"\$RUNTIME" run -d \\
|
||
--name "\$CONTAINER_NAME" \\
|
||
--user "\$(id -u):\$(id -g)" \\
|
||
-v "\$PROJECT_DIR:\$PROJECT_DIR" \\
|
||
"\${_CREDS_ARGS[@]}" \\
|
||
-e ANTHROPIC_API_KEY="\${ANTHROPIC_API_KEY:-}" \\
|
||
-e CS_WORKFLOW_DIR="\$PROJECT_DIR/workflow" \\
|
||
-e PROJECT_ROOT_DIR="\$PROJECT_DIR" \\
|
||
-e HOME="\$HOME" \\
|
||
"\$IMAGE_NAME" \\
|
||
sleep infinity
|
||
|
||
# ── Wait for container to be running ────────────────────────────────────
|
||
echo -n "→ Waiting for container..."
|
||
for _i in 1 2 3 4 5 6 7 8 9 10; do
|
||
if "\$RUNTIME" container inspect "\$CONTAINER_NAME" --format '{{.State.Running}}' 2>/dev/null | grep -q true; then
|
||
echo " ready."
|
||
break
|
||
fi
|
||
echo -n "."
|
||
sleep 0.5
|
||
done
|
||
if ! "\$RUNTIME" container inspect "\$CONTAINER_NAME" --format '{{.State.Running}}' 2>/dev/null | grep -q true; then
|
||
echo ""
|
||
echo "Error: container failed to start." >&2; exit 1
|
||
fi
|
||
|
||
# ── Ensure core deps are installed (Electron binary lives here) ─────────
|
||
if [[ ! -d "\$CS_CORE/app/node_modules" ]]; then
|
||
echo "→ Installing context-studio-core dependencies (first run)..."
|
||
(cd "\$CS_CORE" && npm install) || { echo "npm install failed." >&2; exit 1; }
|
||
fi
|
||
|
||
# ── Register project with Context Studio (required for lock file to be written) ──
|
||
# CS Core's acquireProjectLock() skips writing the lock file if the project
|
||
# isn't registered in ~/.config/context-studio/projects/<uuid>.json.
|
||
# Without the lock file, waitForServers() can never find the registry port
|
||
# and always times out — causing localhost:null errors in the UI.
|
||
_CS_PROJECTS_DIR="\$HOME/.config/context-studio/projects"
|
||
mkdir -p "\$_CS_PROJECTS_DIR"
|
||
_WORKFLOW_DIR="\$PROJECT_DIR/workflow"
|
||
_already_registered=false
|
||
for _f in "\$_CS_PROJECTS_DIR"/*.json; do
|
||
if [[ -f "\$_f" ]] && python3 -c "
|
||
import json,sys
|
||
d=json.load(open(sys.argv[1]))
|
||
sys.exit(0 if d.get('workflowDir') == sys.argv[2] else 1)
|
||
" "\$_f" "\$_WORKFLOW_DIR" 2>/dev/null; then
|
||
_already_registered=true
|
||
break
|
||
fi
|
||
done
|
||
if [[ "\$_already_registered" == "false" ]]; then
|
||
_UUID=\$(python3 -c "import uuid; print(uuid.uuid4())")
|
||
_NOW=\$(python3 -c "from datetime import datetime,timezone; print(datetime.now(timezone.utc).isoformat())")
|
||
python3 -c "
|
||
import json, sys
|
||
data = {
|
||
'id': sys.argv[1],
|
||
'name': sys.argv[2],
|
||
'workflowDir': sys.argv[3],
|
||
'user': 'default',
|
||
'created': sys.argv[4],
|
||
'lastOpened': sys.argv[4]
|
||
}
|
||
with open(sys.argv[5], 'w') as f:
|
||
json.dump(data, f, indent=2)
|
||
f.write('\n')
|
||
" "\$_UUID" "\$(basename "\$PROJECT_DIR")" "\$_WORKFLOW_DIR" "\$_NOW" "\$_CS_PROJECTS_DIR/\$_UUID.json"
|
||
echo "→ Registered project with Context Studio"
|
||
fi
|
||
|
||
# ── Check display for Electron UI ───────────────────────────────────────
|
||
if [[ -z "\${DISPLAY:-}" && -z "\${WAYLAND_DISPLAY:-}" ]]; then
|
||
echo "⚠ No display detected (DISPLAY / WAYLAND_DISPLAY not set)."
|
||
echo " Electron UI may not open. Set DISPLAY=:0 or run from a desktop terminal."
|
||
fi
|
||
|
||
# ── Put claude wrapper first on PATH ────────────────────────────────────
|
||
# Electron inherits this PATH and passes it to the A2A server it spawns,
|
||
# so the servers can locate the bin/claude wrapper inside the container.
|
||
export PATH="\$PROJECT_DIR/bin:\$PATH"
|
||
|
||
# ── Launch Context Studio (Electron manages A2A server startup) ──────────
|
||
# The Electron app checks for a lock file; if servers aren't running it
|
||
# spawns core/start.js --ui-mode=headless internally and waits for health.
|
||
echo "→ Launching Context Studio..."
|
||
CS_CORE_DIR="\$CS_CORE" \\
|
||
CS_WORKFLOW_DIR="\$PROJECT_DIR/workflow" \\
|
||
PROJECT_ROOT_DIR="\$PROJECT_DIR" \\
|
||
"\$CS_CORE/app/node_modules/.bin/electron" "\$CS_CORE/app"
|
||
|
||
# ── Electron closed — stop container ─────────────────────────────────────
|
||
echo "→ UI closed. Stopping agents container..."
|
||
"\$RUNTIME" rm -f "\$CONTAINER_NAME" 2>/dev/null || true
|
||
START
|
||
chmod +x "$project_dir/start.sh"
|
||
|
||
# ── stop.sh — forcefully stop the container ───────────────────────────
|
||
cat > "$project_dir/stop.sh" <<STOP
|
||
#!/usr/bin/env bash
|
||
CONTAINER_NAME="${container_name}"
|
||
RUNTIME="\$(command -v podman || command -v docker || true)"
|
||
echo "→ Stopping \$CONTAINER_NAME..."
|
||
"\$RUNTIME" rm -f "\$CONTAINER_NAME" 2>/dev/null && echo "→ Done." || echo "→ Container was not running."
|
||
STOP
|
||
chmod +x "$project_dir/stop.sh"
|
||
|
||
# ── update.sh — update core, claude-code, and optionally OS packages ──
|
||
cat > "$project_dir/update.sh" <<UPDATE
|
||
#!/usr/bin/env bash
|
||
set -uo pipefail
|
||
SCRIPT_DIR="\$(cd "\$(dirname "\${BASH_SOURCE[0]}")" && pwd)"
|
||
PROJECT_DIR="\$SCRIPT_DIR"
|
||
CONTAINER_NAME="${container_name}"
|
||
IMAGE_NAME="${slug}"
|
||
RUNTIME="\$(command -v podman || command -v docker || true)"
|
||
CS_CORE="\${CS_CORE_DIR:-\$HOME/.context-studio/core}"
|
||
|
||
GREEN='\033[0;32m'; CYAN='\033[0;36m'; YELLOW='\033[1;33m'; BOLD='\033[1m'; RESET='\033[0m'
|
||
info() { echo -e "\${CYAN}\${BOLD}[update]\${RESET} \$*"; }
|
||
success() { echo -e "\${GREEN}\${BOLD}[ok]\${RESET} \$*"; }
|
||
warn() { echo -e "\${YELLOW}\${BOLD}[warn]\${RESET} \$*"; }
|
||
|
||
CONTAINER_RUNNING=false
|
||
if "\$RUNTIME" container inspect "\$CONTAINER_NAME" --format '{{.State.Running}}' 2>/dev/null | grep -q true; then
|
||
CONTAINER_RUNNING=true
|
||
fi
|
||
|
||
# ── 1. Update Context Studio Core ───────────────────────────────────────
|
||
info "Updating Context Studio Core..."
|
||
if [[ -d "\$CS_CORE/.git" ]]; then
|
||
git -C "\$CS_CORE" pull --ff-only && success "Core updated." || warn "Core pull failed — continuing with current version."
|
||
else
|
||
warn "Core not found at \$CS_CORE — skipping."
|
||
fi
|
||
|
||
# ── 2. Update Claude Code in container ──────────────────────────────────
|
||
info "Updating Claude Code (claude-code)..."
|
||
if \$CONTAINER_RUNNING; then
|
||
"\$RUNTIME" exec "\$CONTAINER_NAME" npm install -g @anthropic-ai/claude-code \
|
||
&& success "Claude Code updated in running container." \
|
||
|| warn "Claude Code update failed."
|
||
else
|
||
warn "Container not running — Claude Code will be updated on next image build."
|
||
fi
|
||
|
||
# ── 3. Update OS packages in container ──────────────────────────────────
|
||
info "Updating OS packages in container..."
|
||
if \$CONTAINER_RUNNING; then
|
||
"\$RUNTIME" exec "\$CONTAINER_NAME" bash -c "apt-get update -qq && apt-get upgrade -y" \
|
||
&& success "OS packages updated in running container." \
|
||
|| warn "OS package update failed."
|
||
else
|
||
warn "Container not running — OS packages will be updated on next image build."
|
||
fi
|
||
|
||
# ── 4. Rebuild image to persist updates ─────────────────────────────────
|
||
echo ""
|
||
echo -e "\${BOLD}Rebuild container image?\${RESET}"
|
||
echo -e " Rebuilds from scratch with latest base image + all packages."
|
||
echo -e " ${YELLOW}⚠ Takes 5–15 minutes. Stops the running container.\${RESET}"
|
||
echo -ne "Rebuild now? \${CYAN}[y/N]\${RESET}: "
|
||
read -r _rebuild || true
|
||
_rebuild="\${_rebuild:-n}"
|
||
|
||
if [[ "\$_rebuild" =~ ^[Yy]\$ ]]; then
|
||
info "Stopping container..."
|
||
"\$RUNTIME" rm -f "\$CONTAINER_NAME" 2>/dev/null || true
|
||
|
||
info "Rebuilding image '\$IMAGE_NAME' (--pull --no-cache)..."
|
||
"\$RUNTIME" build --pull --no-cache -t "\$IMAGE_NAME" "\$PROJECT_DIR/.devcontainer/" \
|
||
&& success "Image rebuilt. Run ./start.sh to start with the fresh image." \
|
||
|| { echo "Image build failed." >&2; exit 1; }
|
||
else
|
||
info "Skipped image rebuild."
|
||
if \$CONTAINER_RUNNING; then
|
||
warn "In-container updates are temporary — they will be lost when the container is recreated."
|
||
warn "Run ./update.sh again and choose 'y' to rebuild, or run ./start.sh (which recreates the container from the old image)."
|
||
fi
|
||
fi
|
||
|
||
echo ""
|
||
success "Update complete."
|
||
UPDATE
|
||
chmod +x "$project_dir/update.sh"
|
||
|
||
success "Generated: start.sh stop.sh update.sh bin/claude"
|
||
}
|