diff --git a/.github/docker/Dockerfile.ci b/.github/docker/Dockerfile.ci index 1048bb47cd..43e505e58b 100644 --- a/.github/docker/Dockerfile.ci +++ b/.github/docker/Dockerfile.ci @@ -4,8 +4,25 @@ FROM ubuntu:24.04 ENV DEBIAN_FRONTEND=noninteractive -# System deps -RUN apt-get update && apt-get install -y --no-install-recommends \ +# Switch apt sources to Hetzner's public mirror. +# Ubicloud runners (Hetzner FSN1-DC21) hit reliable connection timeouts to +# archive.ubuntu.com:80 — observed 90+ second outages on multiple builds. +# Hetzner's mirror is publicly accessible from any cloud and route-local for +# Ubicloud, so this fixes both reliability and latency. Ubuntu 24.04 uses +# the deb822 sources format at /etc/apt/sources.list.d/ubuntu.sources. +# +# Using HTTP (not HTTPS) intentionally: the base ubuntu:24.04 image ships +# without ca-certificates, so HTTPS apt fails with "No system certificates +# available." Apt's security model verifies via GPG-signed Release files, +# not TLS, so HTTP here is no weaker than the upstream defaults. +RUN sed -i \ + -e 's|http://archive.ubuntu.com/ubuntu|http://mirror.hetzner.com/ubuntu/packages|g' \ + -e 's|http://security.ubuntu.com/ubuntu|http://mirror.hetzner.com/ubuntu/packages|g' \ + /etc/apt/sources.list.d/ubuntu.sources + +# System deps (retry apt-get update — even Hetzner can blip occasionally) +RUN for i in 1 2 3; do apt-get update && break || sleep 5; done \ + && apt-get install -y --no-install-recommends \ git curl unzip ca-certificates jq bc gpg \ && rm -rf /var/lib/apt/lists/* @@ -14,7 +31,8 @@ RUN curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg \ | gpg --dearmor -o /usr/share/keyrings/githubcli-archive-keyring.gpg \ && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" \ | tee /etc/apt/sources.list.d/github-cli.list > /dev/null \ - && apt-get update && apt-get install -y --no-install-recommends gh \ + && for i in 1 2 3; do apt-get update && break || sleep 5; done \ + && apt-get install -y --no-install-recommends gh \ && rm -rf /var/lib/apt/lists/* # Node.js 22 LTS (needed for claude CLI) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2f9a4ed79..8ebcb3d606 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## [0.18.3.0] - 2026-04-17 + +### Added +- **Windows cookie import.** `/setup-browser-cookies` now works on Windows. Point it at Chrome, Edge, Brave, or Chromium, pick a profile, and gstack will pull your real browser cookies into the headless session. Handles AES-256-GCM (Chrome 80+), DPAPI key unwrap via PowerShell, and falls back to a headless CDP session for v20 App-Bound Encryption on Chrome 127+. Windows users can now do authenticated QA testing with `/qa` and `/design-review` for the first time. +- **One-command OpenCode install.** `./setup --host opencode` now wires up gstack skills for OpenCode the same way it does for Claude Code and Codex. No more manual workaround. + +### Fixed +- **No more permission prompts on every skill invocation.** Every `/browse`, `/qa`, `/qa-only`, `/design-review`, `/office-hours`, `/canary`, `/pair-agent`, `/benchmark`, `/land-and-deploy`, `/design-shotgun`, `/design-consultation`, `/design-html`, `/plan-design-review`, and `/open-gstack-browser` invocation used to trigger Claude Code's sandbox asking about "tilde in assignment value." Replaced bare `~/` with `"$HOME/..."` in the browse and design resolvers plus a handful of templates that still used the old pattern. Every skill runs silently now. +- **Multi-step QA actually works.** The `$B` browse server was dying between Bash tool invocations — Claude Code's sandbox kills the parent shell when a command finishes, and the server took that as a cue to shut down. Now the server persists across calls, keeping your cookies, page state, and navigation intact. Run `$B goto`, then `$B fill`, then `$B click` in three separate Bash calls and it just works. A 30-minute idle timeout still handles eventual cleanup. `Ctrl+C` and `/stop` still do an immediate shutdown. +- **Cookie picker stops stranding the UI.** If the launching CLI exited mid-import, the picker page would flash `Failed to fetch` because the server had shut down under it. The browse server now stays alive while any picker code or session is live. +- **OpenClaw skills load cleanly in Codex.** The 4 hand-authored ClawHub skills (ceo-review, investigate, office-hours, retro) had frontmatter with unquoted colons and non-standard `version`/`metadata` fields that stricter parsers rejected. Now they load without errors on Codex CLI and render correctly on GitHub. + +### For contributors +- Community wave lands 6 PRs: #993 (byliu-labs), #994 (joelgreen), #996 (voidborne-d), #864 (cathrynlavery), #982 (breakneo), #892 (msr-hickory). +- SIGTERM handling is now mode-aware. In normal mode the server ignores SIGTERM so Claude Code's sandbox doesn't tear it down mid-session. In headed mode (`/open-gstack-browser`) and tunnel mode (`/pair-agent`) SIGTERM still triggers a clean shutdown — those modes skip idle cleanup, so without the mode gate orphan daemons would accumulate forever. Note that v0.18.1.0 also disables the parent-PID watchdog when `BROWSE_HEADED=1`, so headed mode is doubly protected. Inline comments document the resolution order. +- Windows v20 App-Bound Encryption CDP fallback now logs the Chrome version on entry and has an inline comment documenting the debug-port security posture (127.0.0.1-only, random port in [9222, 9321] for collision avoidance, always killed in finally). +- New regression test `test/openclaw-native-skills.test.ts` pins OpenClaw skill frontmatter to `name` + `description` only — catches version/metadata drift at PR time. + ## [0.18.2.0] - 2026-04-17 ### Fixed diff --git a/VERSION b/VERSION index 51534b8fd4..c9b0a51441 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.18.2.0 +0.18.3.0 diff --git a/browse/src/cookie-import-browser.ts b/browse/src/cookie-import-browser.ts index 7dc75e07bb..271d3659ba 100644 --- a/browse/src/cookie-import-browser.ts +++ b/browse/src/cookie-import-browser.ts @@ -1,7 +1,7 @@ /** * Chromium browser cookie import — read and decrypt cookies from real browsers * - * Supports macOS and Linux Chromium-based browsers. + * Supports macOS, Linux, and Windows Chromium-based browsers. * Pure logic module — no Playwright dependency, no HTTP concerns. * * Decryption pipeline: @@ -40,6 +40,7 @@ import * as crypto from 'crypto'; import * as fs from 'fs'; import * as path from 'path'; import * as os from 'os'; +import { TEMP_DIR } from './platform'; // ─── Types ────────────────────────────────────────────────────── @@ -50,6 +51,7 @@ export interface BrowserInfo { aliases: string[]; linuxDataDir?: string; linuxApplication?: string; + windowsDataDir?: string; } export interface ProfileEntry { @@ -91,7 +93,7 @@ export class CookieImportError extends Error { } } -type BrowserPlatform = 'darwin' | 'linux'; +type BrowserPlatform = 'darwin' | 'linux' | 'win32'; interface BrowserMatch { browser: BrowserInfo; @@ -104,11 +106,11 @@ interface BrowserMatch { const BROWSER_REGISTRY: BrowserInfo[] = [ { name: 'Comet', dataDir: 'Comet/', keychainService: 'Comet Safe Storage', aliases: ['comet', 'perplexity'] }, - { name: 'Chrome', dataDir: 'Google/Chrome/', keychainService: 'Chrome Safe Storage', aliases: ['chrome', 'google-chrome', 'google-chrome-stable'], linuxDataDir: 'google-chrome/', linuxApplication: 'chrome' }, - { name: 'Chromium', dataDir: 'chromium/', keychainService: 'Chromium Safe Storage', aliases: ['chromium'], linuxDataDir: 'chromium/', linuxApplication: 'chromium' }, + { name: 'Chrome', dataDir: 'Google/Chrome/', keychainService: 'Chrome Safe Storage', aliases: ['chrome', 'google-chrome', 'google-chrome-stable'], linuxDataDir: 'google-chrome/', linuxApplication: 'chrome', windowsDataDir: 'Google/Chrome/User Data/' }, + { name: 'Chromium', dataDir: 'chromium/', keychainService: 'Chromium Safe Storage', aliases: ['chromium'], linuxDataDir: 'chromium/', linuxApplication: 'chromium', windowsDataDir: 'Chromium/User Data/' }, { name: 'Arc', dataDir: 'Arc/User Data/', keychainService: 'Arc Safe Storage', aliases: ['arc'] }, - { name: 'Brave', dataDir: 'BraveSoftware/Brave-Browser/', keychainService: 'Brave Safe Storage', aliases: ['brave'], linuxDataDir: 'BraveSoftware/Brave-Browser/', linuxApplication: 'brave' }, - { name: 'Edge', dataDir: 'Microsoft Edge/', keychainService: 'Microsoft Edge Safe Storage', aliases: ['edge'], linuxDataDir: 'microsoft-edge/', linuxApplication: 'microsoft-edge' }, + { name: 'Brave', dataDir: 'BraveSoftware/Brave-Browser/', keychainService: 'Brave Safe Storage', aliases: ['brave'], linuxDataDir: 'BraveSoftware/Brave-Browser/', linuxApplication: 'brave', windowsDataDir: 'BraveSoftware/Brave-Browser/User Data/' }, + { name: 'Edge', dataDir: 'Microsoft Edge/', keychainService: 'Microsoft Edge Safe Storage', aliases: ['edge'], linuxDataDir: 'microsoft-edge/', linuxApplication: 'microsoft-edge', windowsDataDir: 'Microsoft/Edge/User Data/' }, ]; // ─── Key Cache ────────────────────────────────────────────────── @@ -133,10 +135,12 @@ export function findInstalledBrowsers(): BrowserInfo[] { const browserDir = path.join(getBaseDir(platform), dataDir); try { const entries = fs.readdirSync(browserDir, { withFileTypes: true }); - if (entries.some(e => - e.isDirectory() && e.name.startsWith('Profile ') && - fs.existsSync(path.join(browserDir, e.name, 'Cookies')) - )) return true; + if (entries.some(e => { + if (!e.isDirectory() || !e.name.startsWith('Profile ')) return false; + const profileDir = path.join(browserDir, e.name); + return fs.existsSync(path.join(profileDir, 'Cookies')) + || (platform === 'win32' && fs.existsSync(path.join(profileDir, 'Network', 'Cookies'))); + })) return true; } catch {} } return false; @@ -174,8 +178,11 @@ export function listProfiles(browserName: string): ProfileEntry[] { for (const entry of entries) { if (!entry.isDirectory()) continue; if (entry.name !== 'Default' && !entry.name.startsWith('Profile ')) continue; - const cookiePath = path.join(browserDir, entry.name, 'Cookies'); - if (!fs.existsSync(cookiePath)) continue; + // Chrome 80+ on Windows stores cookies under Network/Cookies + const cookieCandidates = platform === 'win32' + ? [path.join(browserDir, entry.name, 'Network', 'Cookies'), path.join(browserDir, entry.name, 'Cookies')] + : [path.join(browserDir, entry.name, 'Cookies')]; + if (!cookieCandidates.some(p => fs.existsSync(p))) continue; // Avoid duplicates if the same profile appears on multiple platforms if (profiles.some(p => p.name === entry.name)) continue; @@ -268,7 +275,7 @@ export async function importCookies( for (const row of rows) { try { - const value = decryptCookieValue(row, derivedKeys); + const value = decryptCookieValue(row, derivedKeys, match.platform); const cookie = toPlaywrightCookie(row, value); cookies.push(cookie); domainCounts[row.host_key] = (domainCounts[row.host_key] || 0) + 1; @@ -310,7 +317,8 @@ function validateProfile(profile: string): void { } function getHostPlatform(): BrowserPlatform | null { - if (process.platform === 'darwin' || process.platform === 'linux') return process.platform; + const p = process.platform; + if (p === 'darwin' || p === 'linux' || p === 'win32') return p as BrowserPlatform; return null; } @@ -318,20 +326,22 @@ function getSearchPlatforms(): BrowserPlatform[] { const current = getHostPlatform(); const order: BrowserPlatform[] = []; if (current) order.push(current); - for (const platform of ['darwin', 'linux'] as BrowserPlatform[]) { + for (const platform of ['darwin', 'linux', 'win32'] as BrowserPlatform[]) { if (!order.includes(platform)) order.push(platform); } return order; } function getDataDirForPlatform(browser: BrowserInfo, platform: BrowserPlatform): string | null { - return platform === 'darwin' ? browser.dataDir : browser.linuxDataDir || null; + if (platform === 'darwin') return browser.dataDir; + if (platform === 'linux') return browser.linuxDataDir || null; + return browser.windowsDataDir || null; } function getBaseDir(platform: BrowserPlatform): string { - return platform === 'darwin' - ? path.join(os.homedir(), 'Library', 'Application Support') - : path.join(os.homedir(), '.config'); + if (platform === 'darwin') return path.join(os.homedir(), 'Library', 'Application Support'); + if (platform === 'win32') return path.join(os.homedir(), 'AppData', 'Local'); + return path.join(os.homedir(), '.config'); } function findBrowserMatch(browser: BrowserInfo, profile: string): BrowserMatch | null { @@ -339,12 +349,18 @@ function findBrowserMatch(browser: BrowserInfo, profile: string): BrowserMatch | for (const platform of getSearchPlatforms()) { const dataDir = getDataDirForPlatform(browser, platform); if (!dataDir) continue; - const dbPath = path.join(getBaseDir(platform), dataDir, profile, 'Cookies'); - try { - if (fs.existsSync(dbPath)) { - return { browser, platform, dbPath }; - } - } catch {} + const baseProfile = path.join(getBaseDir(platform), dataDir, profile); + // Chrome 80+ on Windows stores cookies under Network/Cookies; fall back to Cookies + const candidates = platform === 'win32' + ? [path.join(baseProfile, 'Network', 'Cookies'), path.join(baseProfile, 'Cookies')] + : [path.join(baseProfile, 'Cookies')]; + for (const dbPath of candidates) { + try { + if (fs.existsSync(dbPath)) { + return { browser, platform, dbPath }; + } + } catch {} + } } return null; } @@ -369,6 +385,13 @@ function getBrowserMatch(browser: BrowserInfo, profile: string): BrowserMatch { // ─── Internal: SQLite Access ──────────────────────────────────── function openDb(dbPath: string, browserName: string): Database { + // On Windows, Chrome holds exclusive WAL locks even when we open readonly. + // The readonly open may "succeed" but return empty results because the WAL + // (where all actual data lives) can't be replayed. Always use the copy + // approach on Windows so we can open read-write and process the WAL. + if (process.platform === 'win32') { + return openDbFromCopy(dbPath, browserName); + } try { return new Database(dbPath, { readonly: true }); } catch (err: any) { @@ -439,6 +462,11 @@ async function getDerivedKeys(match: BrowserMatch): Promise> ]); } + if (match.platform === 'win32') { + const key = await getWindowsAesKey(match.browser); + return new Map([['v10', key]]); + } + const keys = new Map(); keys.set('v10', getCachedDerivedKey('linux:v10', 'peanuts', 1)); @@ -452,6 +480,84 @@ async function getDerivedKeys(match: BrowserMatch): Promise> return keys; } +async function getWindowsAesKey(browser: BrowserInfo): Promise { + const cacheKey = `win32:${browser.keychainService}`; + const cached = keyCache.get(cacheKey); + if (cached) return cached; + + const platform = 'win32' as const; + const dataDir = getDataDirForPlatform(browser, platform); + if (!dataDir) throw new CookieImportError(`No Windows data dir for ${browser.name}`, 'not_installed'); + + const localStatePath = path.join(getBaseDir(platform), dataDir, 'Local State'); + let localState: any; + try { + localState = JSON.parse(fs.readFileSync(localStatePath, 'utf-8')); + } catch (err) { + const reason = err instanceof Error ? `: ${err.message}` : ''; + throw new CookieImportError( + `Cannot read Local State for ${browser.name} at ${localStatePath}${reason}`, + 'keychain_error', + ); + } + + const encryptedKeyB64: string = localState?.os_crypt?.encrypted_key; + if (!encryptedKeyB64) { + throw new CookieImportError( + `No encrypted key in Local State for ${browser.name}`, + 'keychain_not_found', + ); + } + + // The stored value is base64(b"DPAPI" + dpapi_encrypted_bytes) — strip the 5-byte prefix + const encryptedKey = Buffer.from(encryptedKeyB64, 'base64').slice(5); + const key = await dpapiDecrypt(encryptedKey); + keyCache.set(cacheKey, key); + return key; +} + +async function dpapiDecrypt(encryptedBytes: Buffer): Promise { + const script = [ + 'Add-Type -AssemblyName System.Security', + '$stdin = [Console]::In.ReadToEnd().Trim()', + '$bytes = [System.Convert]::FromBase64String($stdin)', + '$dec = [System.Security.Cryptography.ProtectedData]::Unprotect($bytes, $null, [System.Security.Cryptography.DataProtectionScope]::CurrentUser)', + 'Write-Output ([System.Convert]::ToBase64String($dec))', + ].join('; '); + + const proc = Bun.spawn(['powershell', '-NoProfile', '-Command', script], { + stdin: 'pipe', + stdout: 'pipe', + stderr: 'pipe', + }); + + proc.stdin.write(encryptedBytes.toString('base64')); + proc.stdin.end(); + + const timeout = new Promise((_, reject) => + setTimeout(() => { + proc.kill(); + reject(new CookieImportError('DPAPI decryption timed out', 'keychain_timeout', 'retry')); + }, 10_000), + ); + + try { + const exitCode = await Promise.race([proc.exited, timeout]); + const stdout = await new Response(proc.stdout).text(); + if (exitCode !== 0) { + const stderr = await new Response(proc.stderr).text(); + throw new CookieImportError(`DPAPI decryption failed: ${stderr.trim()}`, 'keychain_error'); + } + return Buffer.from(stdout.trim(), 'base64'); + } catch (err) { + if (err instanceof CookieImportError) throw err; + throw new CookieImportError( + `DPAPI decryption failed: ${(err as Error).message}`, + 'keychain_error', + ); + } +} + async function getMacKeychainPassword(service: string): Promise { // Use async Bun.spawn with timeout to avoid blocking the event loop. // macOS may show an Allow/Deny dialog that blocks until the user responds. @@ -566,7 +672,7 @@ interface RawCookie { samesite: number; } -function decryptCookieValue(row: RawCookie, keys: Map): string { +function decryptCookieValue(row: RawCookie, keys: Map, platform: BrowserPlatform): string { // Prefer unencrypted value if present if (row.value && row.value.length > 0) return row.value; @@ -574,9 +680,28 @@ function decryptCookieValue(row: RawCookie, keys: Map): string { if (ev.length === 0) return ''; const prefix = ev.slice(0, 3).toString('utf-8'); + + // Chrome 127+ on Windows uses App-Bound Encryption (v20) — cannot be decrypted + // outside the Chrome process. Caller should fall back to CDP extraction. + if (prefix === 'v20') throw new CookieImportError( + 'Cookie uses App-Bound Encryption (v20). Use CDP extraction instead.', + 'v20_encryption', + ); + const key = keys.get(prefix); if (!key) throw new Error(`No decryption key available for ${prefix} cookies`); + if (platform === 'win32' && prefix === 'v10') { + // Windows: AES-256-GCM — structure: v10(3) + nonce(12) + ciphertext + tag(16) + const nonce = ev.slice(3, 15); + const tag = ev.slice(ev.length - 16); + const ciphertext = ev.slice(15, ev.length - 16); + const decipher = crypto.createDecipheriv('aes-256-gcm', key, nonce) as crypto.DecipherGCM; + decipher.setAuthTag(tag); + return Buffer.concat([decipher.update(ciphertext), decipher.final()]).toString('utf-8'); + } + + // macOS / Linux: AES-128-CBC — structure: v10/v11(3) + ciphertext const ciphertext = ev.slice(3); const iv = Buffer.alloc(16, 0x20); // 16 space characters const decipher = crypto.createDecipheriv('aes-128-cbc', key, iv); @@ -624,3 +749,284 @@ function mapSameSite(value: number): 'Strict' | 'Lax' | 'None' { default: return 'Lax'; } } + + +// ─── CDP-based Cookie Extraction (Windows v20 fallback) ──────── +// When App-Bound Encryption (v20) is detected, we launch Chrome headless +// with remote debugging and extract cookies via the DevTools Protocol. +// This only works when Chrome is NOT already running (profile lock). + +const CHROME_PATHS_WIN = [ + path.join(process.env.PROGRAMFILES || 'C:\\Program Files', 'Google', 'Chrome', 'Application', 'chrome.exe'), + path.join(process.env['PROGRAMFILES(X86)'] || 'C:\\Program Files (x86)', 'Google', 'Chrome', 'Application', 'chrome.exe'), +]; + +const EDGE_PATHS_WIN = [ + path.join(process.env['PROGRAMFILES(X86)'] || 'C:\\Program Files (x86)', 'Microsoft', 'Edge', 'Application', 'msedge.exe'), + path.join(process.env.PROGRAMFILES || 'C:\\Program Files', 'Microsoft', 'Edge', 'Application', 'msedge.exe'), +]; + +function findBrowserExe(browserName: string): string | null { + const candidates = browserName.toLowerCase().includes('edge') ? EDGE_PATHS_WIN : CHROME_PATHS_WIN; + for (const p of candidates) { + if (fs.existsSync(p)) return p; + } + return null; +} + +function isBrowserRunning(browserName: string): Promise { + const exe = browserName.toLowerCase().includes('edge') ? 'msedge.exe' : 'chrome.exe'; + return new Promise((resolve) => { + const proc = Bun.spawn(['tasklist', '/FI', `IMAGENAME eq ${exe}`, '/NH'], { + stdout: 'pipe', stderr: 'pipe', + }); + proc.exited.then(async () => { + const out = await new Response(proc.stdout).text(); + resolve(out.toLowerCase().includes(exe)); + }).catch(() => resolve(false)); + }); +} + +/** + * Extract cookies via Chrome DevTools Protocol. Launches Chrome headless with + * remote debugging on the user's real profile directory. Requires Chrome to be + * closed first (profile lock). + * + * v20 App-Bound Encryption binds decryption keys to the original user-data-dir + * path, so a temp copy of the profile won't work — Chrome silently discards + * cookies it can't decrypt. We must use the real profile. + */ +export async function importCookiesViaCdp( + browserName: string, + domains: string[], + profile = 'Default', +): Promise { + if (domains.length === 0) return { cookies: [], count: 0, failed: 0, domainCounts: {} }; + if (process.platform !== 'win32') { + throw new CookieImportError('CDP extraction is only needed on Windows', 'not_supported'); + } + + const browser = resolveBrowser(browserName); + const exePath = findBrowserExe(browser.name); + if (!exePath) { + throw new CookieImportError( + `Cannot find ${browser.name} executable. Install it or use /connect-chrome.`, + 'not_installed', + ); + } + + if (await isBrowserRunning(browser.name)) { + throw new CookieImportError( + `${browser.name} is running. Close it first so we can launch headless with your profile, or use /connect-chrome to control your real browser directly.`, + 'browser_running', + 'retry', + ); + } + + // Must use the real user data dir — v20 ABE keys are path-bound + const dataDir = getDataDirForPlatform(browser, 'win32'); + if (!dataDir) throw new CookieImportError(`No Windows data dir for ${browser.name}`, 'not_installed'); + const userDataDir = path.join(getBaseDir('win32'), dataDir); + + // Launch Chrome headless with remote debugging on the real profile. + // + // Security posture of the debug port: + // - Chrome binds --remote-debugging-port to 127.0.0.1 by default. We rely + // on that — the port is NOT exposed to the network. Any local process + // running as the same user could connect and read cookies, but if an + // attacker already has local-user access they can read the cookie DB + // directly. Threat model: no worse than baseline. + // - Port is randomized in [9222, 9321] to avoid collisions with other + // Chrome-based tools the user may have open. Not cryptographic. + // - Chrome is always killed in the finally block below (even on crash). + // + // Debugging note: if this path starts failing after a Chrome update, + // check the Chrome version logged below — Chrome's ABE key format (v20) + // or /json/list shape can change between major versions. + const debugPort = 9222 + Math.floor(Math.random() * 100); + const chromeProc = Bun.spawn([ + exePath, + `--remote-debugging-port=${debugPort}`, + `--user-data-dir=${userDataDir}`, + `--profile-directory=${profile}`, + '--headless=new', + '--no-first-run', + '--disable-background-networking', + '--disable-default-apps', + '--disable-extensions', + '--disable-sync', + '--no-default-browser-check', + ], { stdout: 'pipe', stderr: 'pipe' }); + + // Wait for Chrome to start, then find a page target's WebSocket URL. + // Network.getAllCookies is only available on page targets, not browser. + let wsUrl: string | null = null; + const startTime = Date.now(); + let loggedVersion = false; + while (Date.now() - startTime < 15_000) { + try { + // One-time version log for future diagnostics when Chrome changes v20 format. + if (!loggedVersion) { + try { + const versionResp = await fetch(`http://127.0.0.1:${debugPort}/json/version`); + if (versionResp.ok) { + const v = await versionResp.json() as { Browser?: string }; + console.log(`[cookie-import] CDP fallback: ${browser.name} ${v.Browser || 'unknown version'}`); + loggedVersion = true; + } + } catch {} + } + const resp = await fetch(`http://127.0.0.1:${debugPort}/json/list`); + if (resp.ok) { + const targets = await resp.json() as Array<{ type: string; webSocketDebuggerUrl?: string }>; + const page = targets.find(t => t.type === 'page'); + if (page?.webSocketDebuggerUrl) { + wsUrl = page.webSocketDebuggerUrl; + break; + } + } + } catch { + // Not ready yet + } + await new Promise(r => setTimeout(r, 300)); + } + + if (!wsUrl) { + chromeProc.kill(); + throw new CookieImportError( + `${browser.name} headless did not start within 15s`, + 'cdp_timeout', + 'retry', + ); + } + + try { + // Connect via CDP WebSocket + const cookies = await extractCookiesViaCdp(wsUrl, domains); + + const domainCounts: Record = {}; + for (const c of cookies) { + domainCounts[c.domain] = (domainCounts[c.domain] || 0) + 1; + } + + return { cookies, count: cookies.length, failed: 0, domainCounts }; + } finally { + chromeProc.kill(); + } +} + +async function extractCookiesViaCdp(wsUrl: string, domains: string[]): Promise { + return new Promise((resolve, reject) => { + const ws = new WebSocket(wsUrl); + let msgId = 1; + + const timeout = setTimeout(() => { + ws.close(); + reject(new CookieImportError('CDP cookie extraction timed out', 'cdp_timeout')); + }, 10_000); + + ws.onopen = () => { + // Enable Network domain first, then request all cookies + ws.send(JSON.stringify({ id: msgId++, method: 'Network.enable' })); + }; + + ws.onmessage = (event) => { + const data = JSON.parse(String(event.data)); + + // After Network.enable succeeds, request all cookies + if (data.id === 1 && !data.error) { + ws.send(JSON.stringify({ id: msgId, method: 'Network.getAllCookies' })); + return; + } + + if (data.id === msgId && data.result?.cookies) { + clearTimeout(timeout); + ws.close(); + + // Normalize domain matching: domains like ".example.com" match "example.com" and vice versa + const domainSet = new Set(); + for (const d of domains) { + domainSet.add(d); + domainSet.add(d.startsWith('.') ? d.slice(1) : '.' + d); + } + + const matched: PlaywrightCookie[] = []; + for (const c of data.result.cookies as CdpCookie[]) { + if (!domainSet.has(c.domain)) continue; + matched.push({ + name: c.name, + value: c.value, + domain: c.domain, + path: c.path || '/', + expires: c.expires === -1 ? -1 : c.expires, + secure: c.secure, + httpOnly: c.httpOnly, + sameSite: cdpSameSite(c.sameSite), + }); + } + resolve(matched); + } else if (data.id === msgId && data.error) { + clearTimeout(timeout); + ws.close(); + reject(new CookieImportError( + `CDP error: ${data.error.message}`, + 'cdp_error', + )); + } + }; + + ws.onerror = (err) => { + clearTimeout(timeout); + reject(new CookieImportError( + `CDP WebSocket error: ${(err as any).message || 'unknown'}`, + 'cdp_error', + )); + }; + }); +} + +interface CdpCookie { + name: string; + value: string; + domain: string; + path: string; + expires: number; + size: number; + httpOnly: boolean; + secure: boolean; + session: boolean; + sameSite: string; +} + +function cdpSameSite(value: string): 'Strict' | 'Lax' | 'None' { + switch (value) { + case 'Strict': return 'Strict'; + case 'Lax': return 'Lax'; + case 'None': return 'None'; + default: return 'Lax'; + } +} + +/** + * Check if a browser's cookie DB contains v20 (App-Bound) encrypted cookies. + * Quick check — reads a small sample, no decryption attempted. + */ +export function hasV20Cookies(browserName: string, profile = 'Default'): boolean { + if (process.platform !== 'win32') return false; + try { + const browser = resolveBrowser(browserName); + const match = getBrowserMatch(browser, profile); + const db = openDb(match.dbPath, browser.name); + try { + const rows = db.query('SELECT encrypted_value FROM cookies LIMIT 10').all() as Array<{ encrypted_value: Buffer | Uint8Array }>; + return rows.some(row => { + const ev = Buffer.from(row.encrypted_value); + return ev.length >= 3 && ev.slice(0, 3).toString('utf-8') === 'v20'; + }); + } finally { + db.close(); + } + } catch { + return false; + } +} diff --git a/browse/src/cookie-picker-routes.ts b/browse/src/cookie-picker-routes.ts index a78741cc54..07ab5a2c26 100644 --- a/browse/src/cookie-picker-routes.ts +++ b/browse/src/cookie-picker-routes.ts @@ -19,7 +19,7 @@ import * as crypto from 'crypto'; import type { BrowserManager } from './browser-manager'; -import { findInstalledBrowsers, listProfiles, listDomains, importCookies, CookieImportError, type PlaywrightCookie } from './cookie-import-browser'; +import { findInstalledBrowsers, listProfiles, listDomains, importCookies, importCookiesViaCdp, hasV20Cookies, CookieImportError, type PlaywrightCookie } from './cookie-import-browser'; import { getCookiePickerHTML } from './cookie-picker-ui'; // ─── Auth State ───────────────────────────────────────────────── @@ -40,6 +40,23 @@ export function generatePickerCode(): string { return code; } +/** Return true while the picker still has a live code or session. */ +export function hasActivePicker(): boolean { + const now = Date.now(); + + for (const [code, expiry] of pendingCodes) { + if (expiry > now) return true; + pendingCodes.delete(code); + } + + for (const [session, expiry] of validSessions) { + if (expiry > now) return true; + validSessions.delete(session); + } + + return false; +} + /** Extract session ID from the gstack_picker cookie. */ function getSessionFromCookie(req: Request): string | null { const cookie = req.headers.get('cookie'); @@ -217,7 +234,25 @@ export async function handleCookiePickerRoute( } // Decrypt cookies from the browser DB - const result = await importCookies(browser, domains, profile || 'Default'); + const selectedProfile = profile || 'Default'; + let result = await importCookies(browser, domains, selectedProfile); + + // If all cookies failed and v20 encryption is detected, try CDP extraction + if (result.cookies.length === 0 && result.failed > 0 && hasV20Cookies(browser, selectedProfile)) { + console.log(`[cookie-picker] v20 App-Bound Encryption detected, trying CDP extraction...`); + try { + result = await importCookiesViaCdp(browser, domains, selectedProfile); + } catch (cdpErr: any) { + console.log(`[cookie-picker] CDP fallback failed: ${cdpErr.message}`); + return jsonResponse({ + imported: 0, + failed: result.failed, + domainCounts: {}, + message: `Cookies use App-Bound Encryption (v20). Close ${browser}, retry, or use /connect-chrome to browse with your real browser directly.`, + code: 'v20_encryption', + }, { port }); + } + } if (result.cookies.length === 0) { return jsonResponse({ diff --git a/browse/src/server.ts b/browse/src/server.ts index d25fc8fa6b..573a73d5d9 100644 --- a/browse/src/server.ts +++ b/browse/src/server.ts @@ -17,7 +17,7 @@ import { BrowserManager } from './browser-manager'; import { handleReadCommand } from './read-commands'; import { handleWriteCommand } from './write-commands'; import { handleMetaCommand } from './meta-commands'; -import { handleCookiePickerRoute } from './cookie-picker-routes'; +import { handleCookiePickerRoute, hasActivePicker } from './cookie-picker-routes'; import { sanitizeExtensionUrl } from './sidebar-utils'; import { COMMAND_DESCRIPTIONS, PAGE_CONTENT_COMMANDS, wrapUntrustedContent } from './commands'; import { @@ -765,14 +765,37 @@ const idleCheckInterval = setInterval(() => { // also checks BROWSE_HEADED in case a future launcher forgets. // Cleanup happens via browser disconnect event or $B disconnect. const BROWSE_PARENT_PID = parseInt(process.env.BROWSE_PARENT_PID || '0', 10); +// Outer gate: if the spawner explicitly marks this as headed (env var set at +// launch time), skip registering the watchdog entirely. Cheaper than entering +// the closure every 15s. The CLI's connect path sets BROWSE_HEADED=1 + PID=0, +// so this branch is the normal path for /open-gstack-browser. const IS_HEADED_WATCHDOG = process.env.BROWSE_HEADED === '1'; if (BROWSE_PARENT_PID > 0 && !IS_HEADED_WATCHDOG) { + let parentGone = false; setInterval(() => { try { process.kill(BROWSE_PARENT_PID, 0); // signal 0 = existence check only, no signal sent } catch { - console.log(`[browse] Parent process ${BROWSE_PARENT_PID} exited, shutting down`); - shutdown(); + // Parent exited. Resolution order: + // 1. Active cookie picker (one-time code or session live)? Stay alive + // regardless of mode — tearing down the server mid-import leaves the + // picker UI with a stale "Failed to fetch" error. + // 2. Headed / tunnel mode? Shutdown. The idle timeout doesn't apply in + // these modes (see idleCheckInterval above — both early-return), so + // ignoring parent death here would leak orphan daemons after + // /pair-agent or /open-gstack-browser sessions. + // 3. Normal (headless) mode? Stay alive. Claude Code's Bash tool kills + // the parent shell between invocations. The idle timeout (30 min) + // handles eventual cleanup. + if (hasActivePicker()) return; + const headed = browserManager.getConnectionMode() === 'headed'; + if (headed || tunnelActive) { + console.log(`[browse] Parent process ${BROWSE_PARENT_PID} exited in ${headed ? 'headed' : 'tunnel'} mode, shutting down`); + shutdown(); + } else if (!parentGone) { + parentGone = true; + console.log(`[browse] Parent process ${BROWSE_PARENT_PID} exited (server stays alive, idle timeout will clean up)`); + } } }, 15_000); } else if (IS_HEADED_WATCHDOG) { @@ -1241,11 +1264,36 @@ async function shutdown(exitCode: number = 0) { } // Handle signals +// // Node passes the signal name (e.g. 'SIGTERM') as the first arg to listeners. -// Wrap so shutdown() receives no args — otherwise the string gets passed as -// exitCode and process.exit() coerces it to NaN, exiting with code 1 instead of 0. -process.on('SIGTERM', () => shutdown()); +// Wrap calls to shutdown() so it receives no args — otherwise the string gets +// passed as exitCode and process.exit() coerces it to NaN, exiting with code 1 +// instead of 0. (Caught in v0.18.1.0 #1025.) +// +// SIGINT (Ctrl+C): user intentionally stopping → shutdown. process.on('SIGINT', () => shutdown()); +// SIGTERM behavior depends on mode: +// - Normal (headless) mode: Claude Code's Bash sandbox fires SIGTERM when the +// parent shell exits between tool invocations. Ignoring it keeps the server +// alive across $B calls. Idle timeout (30 min) handles eventual cleanup. +// - Headed / tunnel mode: idle timeout doesn't apply in these modes. Respect +// SIGTERM so external tooling (systemd, supervisord, CI) can shut cleanly +// without waiting forever. Ctrl+C and /stop still work either way. +// - Active cookie picker: never tear down mid-import regardless of mode — +// would strand the picker UI with "Failed to fetch." +process.on('SIGTERM', () => { + if (hasActivePicker()) { + console.log('[browse] Received SIGTERM but cookie picker is active, ignoring to avoid stranding the picker UI'); + return; + } + const headed = browserManager.getConnectionMode() === 'headed'; + if (headed || tunnelActive) { + console.log(`[browse] Received SIGTERM in ${headed ? 'headed' : 'tunnel'} mode, shutting down`); + shutdown(); + } else { + console.log('[browse] Received SIGTERM (ignoring — use /stop or Ctrl+C for intentional shutdown)'); + } +}); // Windows: taskkill /F bypasses SIGTERM, but 'exit' fires for some shutdown paths. // Defense-in-depth — primary cleanup is the CLI's stale-state detection via health check. if (process.platform === 'win32') { diff --git a/browse/src/write-commands.ts b/browse/src/write-commands.ts index 779a858e0a..8dbb16f7e9 100644 --- a/browse/src/write-commands.ts +++ b/browse/src/write-commands.ts @@ -7,7 +7,7 @@ import type { TabSession } from './tab-session'; import type { BrowserManager } from './browser-manager'; -import { findInstalledBrowsers, importCookies, listSupportedBrowserNames } from './cookie-import-browser'; +import { findInstalledBrowsers, importCookies, importCookiesViaCdp, hasV20Cookies, listSupportedBrowserNames } from './cookie-import-browser'; import { generatePickerCode } from './cookie-picker-routes'; import { validateNavigationUrl } from './url-validation'; import { validateOutputPath } from './path-security'; @@ -504,7 +504,11 @@ export async function handleWriteCommand( throw new Error(`--domain "${domain}" does not match current page domain "${pageHostname}". Navigate to the target site first.`); } const browser = browserArg || 'comet'; - const result = await importCookies(browser, [domain], profile); + let result = await importCookies(browser, [domain], profile); + // If all cookies failed and v20 is detected, try CDP extraction + if (result.cookies.length === 0 && result.failed > 0 && hasV20Cookies(browser, profile)) { + result = await importCookiesViaCdp(browser, [domain], profile); + } if (result.cookies.length > 0) { await page.context().addCookies(result.cookies); bm.trackCookieImportDomains([domain]); diff --git a/browse/test/cookie-picker-routes.test.ts b/browse/test/cookie-picker-routes.test.ts index 506156085e..c1934cd86c 100644 --- a/browse/test/cookie-picker-routes.test.ts +++ b/browse/test/cookie-picker-routes.test.ts @@ -7,7 +7,7 @@ */ import { describe, test, expect } from 'bun:test'; -import { handleCookiePickerRoute, generatePickerCode } from '../src/cookie-picker-routes'; +import { handleCookiePickerRoute, generatePickerCode, hasActivePicker } from '../src/cookie-picker-routes'; // ─── Mock BrowserManager ────────────────────────────────────── @@ -284,6 +284,57 @@ describe('cookie-picker-routes', () => { }); }); + describe('active picker tracking', () => { + test('one-time codes keep the picker active until consumed', async () => { + const realNow = Date.now; + Date.now = () => realNow() + 3_700_000; + try { + expect(hasActivePicker()).toBe(false); // clears any stale state from prior tests + } finally { + Date.now = realNow; + } + + const { bm } = mockBrowserManager(); + const code = generatePickerCode(); + expect(hasActivePicker()).toBe(true); + + const res = await handleCookiePickerRoute( + makeUrl(`/cookie-picker?code=${code}`), + new Request('http://127.0.0.1:9470', { method: 'GET' }), + bm, + 'test-token', + ); + + expect(res.status).toBe(302); + expect(hasActivePicker()).toBe(true); // session is now active + }); + + test('picker becomes inactive after an invalid session probe clears expired state', async () => { + const { bm } = mockBrowserManager(); + const session = await getSessionCookie(bm, 'test-token'); + expect(hasActivePicker()).toBe(true); + + const realNow = Date.now; + Date.now = () => realNow() + 3_700_000; + try { + const res = await handleCookiePickerRoute( + makeUrl('/cookie-picker'), + new Request('http://127.0.0.1:9470', { + method: 'GET', + headers: { 'Cookie': `gstack_picker=${session}` }, + }), + bm, + 'test-token', + ); + + expect(res.status).toBe(403); + expect(hasActivePicker()).toBe(false); + } finally { + Date.now = realNow; + } + }); + }); + describe('session cookie auth', () => { test('valid session cookie grants HTML access', async () => { const { bm } = mockBrowserManager(); diff --git a/browse/test/watchdog.test.ts b/browse/test/watchdog.test.ts index 1a6fd9af1d..42faa262a1 100644 --- a/browse/test/watchdog.test.ts +++ b/browse/test/watchdog.test.ts @@ -5,16 +5,28 @@ import * as fs from 'fs'; import * as os from 'os'; // End-to-end regression tests for the parent-process watchdog in server.ts. -// Proves three invariants that the v0.18.1.0 fix depends on: +// The watchdog has layered behavior since v0.18.1.0 (#1025) and v0.18.2.0 +// (community wave #994 + our mode-gating follow-up): // -// 1. BROWSE_PARENT_PID=0 disables the watchdog (opt-in used by CI and pair-agent). -// 2. BROWSE_HEADED=1 disables the watchdog (server-side defense-in-depth). -// 3. Default headless mode still kills the server when its parent dies -// (the original orphan-prevention must keep working). +// 1. BROWSE_PARENT_PID=0 disables the watchdog entirely (opt-in for CI + pair-agent). +// 2. BROWSE_HEADED=1 disables the watchdog entirely (server-side defense for headed +// mode, where the user controls window lifecycle). +// 3. Default headless mode + parent dies: server STAYS ALIVE. The original +// "kill on parent death" was inverted by #994 because Claude Code's Bash +// sandbox kills the parent shell between every tool invocation, and #994 +// makes browse persist across $B calls. Idle timeout (30 min) handles +// eventual cleanup. // -// Each test spawns the real server.ts, not a mock. Tests 1 and 2 verify the -// code path via stdout log line (fast). Test 3 waits for the watchdog's 15s -// poll cycle to actually fire (slow — ~25s). +// Tunnel mode coverage (parent dies → shutdown because idle timeout doesn't +// apply) is not covered by an automated test here — tunnelActive is a runtime +// variable set by /pair-agent's tunnel-create flow, not an env var, so faking +// it would require invasive test-only hooks. The mode check is documented +// inline at the watchdog and SIGTERM handlers, and would regress visibly for +// /pair-agent users (server lingers after disconnect). +// +// Each test spawns the real server.ts. Tests 1 and 2 verify behavior via +// stdout log line (fast). Test 3 waits for the watchdog poll cycle to confirm +// the server REMAINS alive after parent death (slow — ~20s observation window). const ROOT = path.resolve(import.meta.dir, '..'); const SERVER_SCRIPT = path.join(ROOT, 'src', 'server.ts'); @@ -117,7 +129,7 @@ describe('parent-process watchdog (v0.18.1.0)', () => { expect(out).not.toContain('Parent process 999999 exited'); }, 15_000); - test('default headless mode: watchdog fires when parent dies', async () => { + test('default headless mode: server STAYS ALIVE when parent dies (#994)', async () => { tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'watchdog-default-')); // Spawn a real, short-lived "parent" that the watchdog will poll. @@ -133,15 +145,13 @@ describe('parent-process watchdog (v0.18.1.0)', () => { expect(isProcessAlive(serverPid)).toBe(true); // Kill the parent. The watchdog polls every 15s, so first tick after - // parent death lands within ~15s, plus shutdown() cleanup time. + // parent death lands within ~15s. Pre-#994 the server would shutdown + // here. Post-#994 the server logs the parent exit and stays alive. parentProc.kill('SIGKILL'); - // Poll for up to 25s for the server to exit. - const deadline = Date.now() + 25_000; - while (Date.now() < deadline) { - if (!isProcessAlive(serverPid)) break; - await Bun.sleep(500); - } - expect(isProcessAlive(serverPid)).toBe(false); + // Wait long enough for at least one watchdog tick (15s) plus margin. + // Server should still be alive — that's the whole point of #994. + await Bun.sleep(20_000); + expect(isProcessAlive(serverPid)).toBe(true); }, 45_000); }); diff --git a/design-consultation/SKILL.md b/design-consultation/SKILL.md index 36d89123b1..baa0f00b0a 100644 --- a/design-consultation/SKILL.md +++ b/design-consultation/SKILL.md @@ -662,7 +662,7 @@ If browse is not available, that's fine — visual research is optional. The ski _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/design/dist/design" ] && D="$_ROOT/.claude/skills/gstack/design/dist/design" -[ -z "$D" ] && D=~/.claude/skills/gstack/design/dist/design +[ -z "$D" ] && D="$HOME/.claude/skills/gstack/design/dist/design" if [ -x "$D" ]; then echo "DESIGN_READY: $D" else @@ -670,7 +670,7 @@ else fi B="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" -[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse +[ -z "$B" ] && B="$HOME/.claude/skills/gstack/browse/dist/browse" if [ -x "$B" ]; then echo "BROWSE_READY: $B" else @@ -985,7 +985,7 @@ Generate AI-rendered mockups showing the proposed design system applied to reali ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/design-system-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/design-system-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" ``` diff --git a/design-consultation/SKILL.md.tmpl b/design-consultation/SKILL.md.tmpl index d80c7fb264..fe26c1fe1a 100644 --- a/design-consultation/SKILL.md.tmpl +++ b/design-consultation/SKILL.md.tmpl @@ -263,7 +263,7 @@ Generate AI-rendered mockups showing the proposed design system applied to reali ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/design-system-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/design-system-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" ``` diff --git a/design-html/SKILL.md b/design-html/SKILL.md index ea73c8524b..d36c1d1c93 100644 --- a/design-html/SKILL.md +++ b/design-html/SKILL.md @@ -571,7 +571,7 @@ around obstacles. _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/design/dist/design" ] && D="$_ROOT/.claude/skills/gstack/design/dist/design" -[ -z "$D" ] && D=~/.claude/skills/gstack/design/dist/design +[ -z "$D" ] && D="$HOME/.claude/skills/gstack/design/dist/design" if [ -x "$D" ]; then echo "DESIGN_READY: $D" else @@ -579,7 +579,7 @@ else fi B="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" -[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse +[ -z "$B" ] && B="$HOME/.claude/skills/gstack/browse/dist/browse" if [ -x "$B" ]; then echo "BROWSE_READY: $B" else diff --git a/design-review/SKILL.md b/design-review/SKILL.md index cc1f0d1635..e4fe88e7ba 100644 --- a/design-review/SKILL.md +++ b/design-review/SKILL.md @@ -825,7 +825,7 @@ Only commit if there are changes. Stage all bootstrap files (config, test direct _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/design/dist/design" ] && D="$_ROOT/.claude/skills/gstack/design/dist/design" -[ -z "$D" ] && D=~/.claude/skills/gstack/design/dist/design +[ -z "$D" ] && D="$HOME/.claude/skills/gstack/design/dist/design" if [ -x "$D" ]; then echo "DESIGN_READY: $D" else @@ -833,7 +833,7 @@ else fi B="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" -[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse +[ -z "$B" ] && B="$HOME/.claude/skills/gstack/browse/dist/browse" if [ -x "$B" ]; then echo "BROWSE_READY: $B" else @@ -870,7 +870,7 @@ If `DESIGN_NOT_AVAILABLE`: skip mockup generation — the fix loop works without ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -REPORT_DIR=~/.gstack/projects/$SLUG/designs/design-audit-$(date +%Y%m%d) +REPORT_DIR="$HOME/.gstack/projects/$SLUG/designs/design-audit-$(date +%Y%m%d)" mkdir -p "$REPORT_DIR/screenshots" echo "REPORT_DIR: $REPORT_DIR" ``` diff --git a/design-review/SKILL.md.tmpl b/design-review/SKILL.md.tmpl index fab9bb39e6..bdcda48e29 100644 --- a/design-review/SKILL.md.tmpl +++ b/design-review/SKILL.md.tmpl @@ -96,7 +96,7 @@ If `DESIGN_NOT_AVAILABLE`: skip mockup generation — the fix loop works without ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -REPORT_DIR=~/.gstack/projects/$SLUG/designs/design-audit-$(date +%Y%m%d) +REPORT_DIR="$HOME/.gstack/projects/$SLUG/designs/design-audit-$(date +%Y%m%d)" mkdir -p "$REPORT_DIR/screenshots" echo "REPORT_DIR: $REPORT_DIR" ``` diff --git a/design-shotgun/SKILL.md b/design-shotgun/SKILL.md index 861ee06d14..c61b15f8d6 100644 --- a/design-shotgun/SKILL.md +++ b/design-shotgun/SKILL.md @@ -565,7 +565,7 @@ visual brainstorming, not a review process. _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/design/dist/design" ] && D="$_ROOT/.claude/skills/gstack/design/dist/design" -[ -z "$D" ] && D=~/.claude/skills/gstack/design/dist/design +[ -z "$D" ] && D="$HOME/.claude/skills/gstack/design/dist/design" if [ -x "$D" ]; then echo "DESIGN_READY: $D" else @@ -573,7 +573,7 @@ else fi B="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" -[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse +[ -z "$B" ] && B="$HOME/.claude/skills/gstack/browse/dist/browse" if [ -x "$B" ]; then echo "BROWSE_READY: $B" else @@ -797,7 +797,7 @@ Set up the output directory: ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" ``` diff --git a/design-shotgun/SKILL.md.tmpl b/design-shotgun/SKILL.md.tmpl index 4842409d2e..ab22c312fc 100644 --- a/design-shotgun/SKILL.md.tmpl +++ b/design-shotgun/SKILL.md.tmpl @@ -144,7 +144,7 @@ Set up the output directory: ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" ``` diff --git a/hosts/opencode.ts b/hosts/opencode.ts index dc4a5bfc20..3ad0901ec1 100644 --- a/hosts/opencode.ts +++ b/hosts/opencode.ts @@ -31,9 +31,9 @@ const opencode: HostConfig = { suppressedResolvers: ['GBRAIN_CONTEXT_LOAD', 'GBRAIN_SAVE_RESULTS'], runtimeRoot: { - globalSymlinks: ['bin', 'browse/dist', 'browse/bin', 'gstack-upgrade', 'ETHOS.md'], + globalSymlinks: ['bin', 'browse/dist', 'browse/bin', 'design/dist', 'gstack-upgrade', 'ETHOS.md', 'review/specialists', 'qa/templates', 'qa/references', 'plan-devex-review/dx-hall-of-fame.md'], globalFiles: { - 'review': ['checklist.md', 'TODOS-format.md'], + 'review': ['checklist.md', 'design-checklist.md', 'greptile-triage.md', 'TODOS-format.md'], }, }, diff --git a/office-hours/SKILL.md b/office-hours/SKILL.md index 0c31095fc8..699e4a58b5 100644 --- a/office-hours/SKILL.md +++ b/office-hours/SKILL.md @@ -1124,7 +1124,7 @@ Present via AskUserQuestion. Do NOT proceed without user approval of the approac _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/design/dist/design" ] && D="$_ROOT/.claude/skills/gstack/design/dist/design" -[ -z "$D" ] && D=~/.claude/skills/gstack/design/dist/design +[ -z "$D" ] && D="$HOME/.claude/skills/gstack/design/dist/design" [ -x "$D" ] && echo "DESIGN_READY" || echo "DESIGN_NOT_AVAILABLE" ``` @@ -1139,7 +1139,7 @@ Generating visual mockups of the proposed design... (say "skip" if you don't nee ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/mockup-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/mockup-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" ``` diff --git a/openclaw/skills/gstack-openclaw-ceo-review/SKILL.md b/openclaw/skills/gstack-openclaw-ceo-review/SKILL.md index a11f15814a..c0b191cfb5 100644 --- a/openclaw/skills/gstack-openclaw-ceo-review/SKILL.md +++ b/openclaw/skills/gstack-openclaw-ceo-review/SKILL.md @@ -1,8 +1,6 @@ --- name: gstack-openclaw-ceo-review -description: CEO/founder-mode plan review. Rethink the problem, find the 10-star product, challenge premises, expand scope when it creates a better product. Four modes: SCOPE EXPANSION (dream big), SELECTIVE EXPANSION (hold scope + cherry-pick), HOLD SCOPE (maximum rigor), SCOPE REDUCTION (strip to essentials). Use when asked to review a plan, challenge this, CEO review, poke holes, think bigger, or expand scope. -version: 1.0.0 -metadata: { "openclaw": { "emoji": "👑" } } +description: Use when asked to review a plan, challenge a proposal, run a CEO review, poke holes in an approach, think bigger about scope, or decide whether to expand or reduce the plan. --- # CEO Plan Review @@ -129,7 +127,6 @@ Once selected, commit fully. Do not silently drift. **Anti-skip rule:** Never condense, abbreviate, or skip any review section regardless of plan type. If a section genuinely has zero findings, say "No issues found" and move on, but you must evaluate it. Ask the user about each issue ONE AT A TIME. Do NOT batch. -**Reminder: Do NOT make any code changes. Review only.** ### Section 1: Architecture Review Evaluate system design, component boundaries, data flow (all four paths), state machines, coupling, scaling, security architecture, production failure scenarios, rollback posture. Draw dependency graphs. diff --git a/openclaw/skills/gstack-openclaw-investigate/SKILL.md b/openclaw/skills/gstack-openclaw-investigate/SKILL.md index e83d9cda66..829476f9b3 100644 --- a/openclaw/skills/gstack-openclaw-investigate/SKILL.md +++ b/openclaw/skills/gstack-openclaw-investigate/SKILL.md @@ -1,8 +1,6 @@ --- name: gstack-openclaw-investigate -description: Systematic debugging with root cause investigation. Four phases: investigate, analyze, hypothesize, implement. Iron Law: no fixes without root cause. Use when asked to debug, fix a bug, investigate an error, or root cause analysis. Proactively use when user reports errors, stack traces, unexpected behavior, or says something stopped working. -version: 1.0.0 -metadata: { "openclaw": { "emoji": "🔍" } } +description: Use when asked to debug, fix a bug, investigate an error, or do root cause analysis, and when users report errors, stack traces, unexpected behavior, or say something stopped working. --- # Systematic Debugging diff --git a/openclaw/skills/gstack-openclaw-office-hours/SKILL.md b/openclaw/skills/gstack-openclaw-office-hours/SKILL.md index 942f0d6d5a..9d52b3134e 100644 --- a/openclaw/skills/gstack-openclaw-office-hours/SKILL.md +++ b/openclaw/skills/gstack-openclaw-office-hours/SKILL.md @@ -1,8 +1,6 @@ --- name: gstack-openclaw-office-hours -description: Product interrogation with six forcing questions. Two modes: startup diagnostic (demand reality, status quo, desperate specificity, narrowest wedge, observation, future-fit) and builder brainstorm. Use when asked to brainstorm, "is this worth building", "I have an idea", "office hours", or "help me think through this". Proactively use when user describes a new product idea or wants to think through design decisions before any code is written. -version: 1.0.0 -metadata: { "openclaw": { "emoji": "🎯" } } +description: Use when asked to brainstorm, evaluate whether an idea is worth building, run office hours, or think through a new product idea or design direction before any code is written. --- # YC Office Hours @@ -281,8 +279,7 @@ Count the signals for the closing message. ## Phase 5: Design Doc -Write the design document and save it to memory. After writing, tell the user: -**"Design doc saved. Other skills (/plan-ceo-review, /plan-eng-review) will find it automatically."** +Write the design document and save it to memory. ### Startup mode design doc template: diff --git a/openclaw/skills/gstack-openclaw-retro/SKILL.md b/openclaw/skills/gstack-openclaw-retro/SKILL.md index 247a94d697..eefc981810 100644 --- a/openclaw/skills/gstack-openclaw-retro/SKILL.md +++ b/openclaw/skills/gstack-openclaw-retro/SKILL.md @@ -1,8 +1,6 @@ --- name: gstack-openclaw-retro -description: Weekly engineering retrospective. Analyzes commit history, work patterns, and code quality metrics with persistent history and trend tracking. Team-aware with per-person contributions, praise, and growth areas. Use when asked for weekly retro, what shipped this week, or engineering retrospective. -version: 1.0.0 -metadata: { "openclaw": { "emoji": "📊" } } +description: "Weekly engineering retrospective. Analyzes commit history, work patterns, and code quality metrics with persistent history and trend tracking. Team-aware with per-person contributions, praise, and growth areas. Use when asked for weekly retro, what shipped this week, or engineering retrospective." --- # Weekly Engineering Retrospective @@ -25,11 +23,6 @@ Parse the argument to determine the time window. Default to 7 days. All times sh --- -### Non-git context (optional) - -Check memory for non-git context: meeting notes, calendar events, decisions, and other -context that doesn't appear in git history. If found, incorporate into the retro narrative. - ### Step 1: Gather Raw Data First, fetch origin and identify the current user: diff --git a/package.json b/package.json index 6bd3facbc3..5222ec4c11 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "gstack", - "version": "0.18.2.0", + "version": "0.18.3.0", "description": "Garry's Stack — Claude Code skills + fast headless browser. One repo, one install, entire AI engineering workflow.", "license": "MIT", "type": "module", diff --git a/plan-design-review/SKILL.md b/plan-design-review/SKILL.md index 9a3ce36e37..e8bde0eccc 100644 --- a/plan-design-review/SKILL.md +++ b/plan-design-review/SKILL.md @@ -808,7 +808,7 @@ Report findings before proceeding to Step 0. _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/design/dist/design" ] && D="$_ROOT/.claude/skills/gstack/design/dist/design" -[ -z "$D" ] && D=~/.claude/skills/gstack/design/dist/design +[ -z "$D" ] && D="$HOME/.claude/skills/gstack/design/dist/design" if [ -x "$D" ]; then echo "DESIGN_READY: $D" else @@ -816,7 +816,7 @@ else fi B="" [ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" -[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse +[ -z "$B" ] && B="$HOME/.claude/skills/gstack/browse/dist/browse" if [ -x "$B" ]; then echo "BROWSE_READY: $B" else @@ -896,7 +896,7 @@ First, set up the output directory. Name it after the screen/feature being desig ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" ``` diff --git a/plan-design-review/SKILL.md.tmpl b/plan-design-review/SKILL.md.tmpl index b9c42d82db..a4b40d2cb1 100644 --- a/plan-design-review/SKILL.md.tmpl +++ b/plan-design-review/SKILL.md.tmpl @@ -188,7 +188,7 @@ First, set up the output directory. Name it after the screen/feature being desig ```bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" ``` diff --git a/scripts/resolvers/design.ts b/scripts/resolvers/design.ts index 926e348449..191a1b1088 100644 --- a/scripts/resolvers/design.ts +++ b/scripts/resolvers/design.ts @@ -792,7 +792,7 @@ export function generateDesignSetup(ctx: TemplateContext): string { _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/${ctx.paths.localSkillRoot}/design/dist/design" ] && D="$_ROOT/${ctx.paths.localSkillRoot}/design/dist/design" -[ -z "$D" ] && D=${ctx.paths.designDir}/design +[ -z "$D" ] && D="$HOME${ctx.paths.designDir.replace(/^~/, '')}/design" if [ -x "$D" ]; then echo "DESIGN_READY: $D" else @@ -800,7 +800,7 @@ else fi B="" [ -n "$_ROOT" ] && [ -x "$_ROOT/${ctx.paths.localSkillRoot}/browse/dist/browse" ] && B="$_ROOT/${ctx.paths.localSkillRoot}/browse/dist/browse" -[ -z "$B" ] && B=${ctx.paths.browseDir}/browse +[ -z "$B" ] && B="$HOME${ctx.paths.browseDir.replace(/^~/, '')}/browse" if [ -x "$B" ]; then echo "BROWSE_READY: $B" else @@ -837,7 +837,7 @@ export function generateDesignMockup(ctx: TemplateContext): string { _ROOT=$(git rev-parse --show-toplevel 2>/dev/null) D="" [ -n "$_ROOT" ] && [ -x "$_ROOT/${ctx.paths.localSkillRoot}/design/dist/design" ] && D="$_ROOT/${ctx.paths.localSkillRoot}/design/dist/design" -[ -z "$D" ] && D=${ctx.paths.designDir}/design +[ -z "$D" ] && D="$HOME${ctx.paths.designDir.replace(/^~/, '')}/design" [ -x "$D" ] && echo "DESIGN_READY" || echo "DESIGN_NOT_AVAILABLE" \`\`\` @@ -852,7 +852,7 @@ Generating visual mockups of the proposed design... (say "skip" if you don't nee \`\`\`bash eval "$(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null)" -_DESIGN_DIR=~/.gstack/projects/$SLUG/designs/mockup-$(date +%Y%m%d) +_DESIGN_DIR="$HOME/.gstack/projects/$SLUG/designs/mockup-$(date +%Y%m%d)" mkdir -p "$_DESIGN_DIR" echo "DESIGN_DIR: $_DESIGN_DIR" \`\`\` diff --git a/setup b/setup index 06a5b2a96c..993d60cd12 100755 --- a/setup +++ b/setup @@ -22,6 +22,8 @@ CODEX_SKILLS="$HOME/.codex/skills" CODEX_GSTACK="$CODEX_SKILLS/gstack" FACTORY_SKILLS="$HOME/.factory/skills" FACTORY_GSTACK="$FACTORY_SKILLS/gstack" +OPENCODE_SKILLS="$HOME/.config/opencode/skills" +OPENCODE_GSTACK="$OPENCODE_SKILLS/gstack" COPILOT_SKILLS="$HOME/.copilot/skills" COPILOT_GSTACK="$COPILOT_SKILLS/gstack" @@ -43,7 +45,7 @@ TEAM_MODE=0 NO_TEAM_MODE=0 while [ $# -gt 0 ]; do case "$1" in - --host) [ -z "$2" ] && echo "Missing value for --host (expected claude, codex, kiro, copilot, or auto)" >&2 && exit 1; HOST="$2"; shift 2 ;; + --host) [ -z "$2" ] && echo "Missing value for --host (expected claude, codex, kiro, factory, opencode, openclaw, hermes, gbrain, copilot, or auto)" >&2 && exit 1; HOST="$2"; shift 2 ;; --host=*) HOST="${1#--host=}"; shift ;; --local) LOCAL_INSTALL=1; shift ;; --prefix) SKILL_PREFIX=1; SKILL_PREFIX_FLAG=1; shift ;; @@ -56,7 +58,7 @@ while [ $# -gt 0 ]; do done case "$HOST" in - claude|codex|kiro|factory|copilot|auto) ;; + claude|codex|kiro|factory|opencode|copilot|auto) ;; openclaw) echo "" echo "OpenClaw integration uses a different model — OpenClaw spawns Claude Code" @@ -91,7 +93,7 @@ case "$HOST" in echo "GBrain setup and brain skills ship from the GBrain repo." echo "" exit 0 ;; - *) echo "Unknown --host value: $HOST (expected claude, codex, kiro, factory, openclaw, hermes, gbrain, copilot, or auto)" >&2; exit 1 ;; + *) echo "Unknown --host value: $HOST (expected claude, codex, kiro, factory, opencode, openclaw, hermes, gbrain, copilot, or auto)" >&2; exit 1 ;; esac # ─── Resolve skill prefix preference ───────────────────────── @@ -154,15 +156,16 @@ INSTALL_CLAUDE=0 INSTALL_CODEX=0 INSTALL_KIRO=0 INSTALL_FACTORY=0 +INSTALL_OPENCODE=0 INSTALL_COPILOT=0 if [ "$HOST" = "auto" ]; then command -v claude >/dev/null 2>&1 && INSTALL_CLAUDE=1 command -v codex >/dev/null 2>&1 && INSTALL_CODEX=1 command -v kiro-cli >/dev/null 2>&1 && INSTALL_KIRO=1 command -v droid >/dev/null 2>&1 && INSTALL_FACTORY=1 - command -v copilot >/dev/null 2>&1 && INSTALL_COPILOT=1 + command -v opencode >/dev/null 2>&1 && INSTALL_OPENCODE=1 # If none found, default to claude - if [ "$INSTALL_CLAUDE" -eq 0 ] && [ "$INSTALL_CODEX" -eq 0 ] && [ "$INSTALL_KIRO" -eq 0 ] && [ "$INSTALL_FACTORY" -eq 0 ] && [ "$INSTALL_COPILOT" -eq 0 ]; then + if [ "$INSTALL_CLAUDE" -eq 0 ] && [ "$INSTALL_CODEX" -eq 0 ] && [ "$INSTALL_KIRO" -eq 0 ] && [ "$INSTALL_FACTORY" -eq 0 ] && [ "$INSTALL_OPENCODE" -eq 0 ] && [ "$INSTALL_COPILOT" -eq 0 ]; then INSTALL_CLAUDE=1 fi elif [ "$HOST" = "claude" ]; then @@ -173,6 +176,8 @@ elif [ "$HOST" = "kiro" ]; then INSTALL_KIRO=1 elif [ "$HOST" = "factory" ]; then INSTALL_FACTORY=1 +elif [ "$HOST" = "opencode" ]; then + INSTALL_OPENCODE=1 elif [ "$HOST" = "copilot" ]; then INSTALL_COPILOT=1 fi @@ -277,6 +282,13 @@ if [ "$INSTALL_FACTORY" -eq 1 ] && [ "$NEEDS_BUILD" -eq 0 ]; then ) fi +# 1d. Generate .opencode/ OpenCode skill docs +if [ "$INSTALL_OPENCODE" -eq 1 ] && [ "$NEEDS_BUILD" -eq 0 ]; then + log "Generating .opencode/ skill docs..." + ( + cd "$SOURCE_GSTACK_DIR" + bun install --frozen-lockfile 2>/dev/null || bun install + bun run gen:skill-docs --host opencode # 1d. Generate .copilot/ Copilot skill docs if [ "$INSTALL_COPILOT" -eq 1 ] && [ "$NEEDS_BUILD" -eq 0 ]; then log "Generating .copilot/ skill docs..." @@ -612,6 +624,59 @@ create_factory_runtime_root() { fi } +create_opencode_runtime_root() { + local gstack_dir="$1" + local opencode_gstack="$2" + local opencode_dir="$gstack_dir/.opencode/skills" + + if [ -L "$opencode_gstack" ]; then + rm -f "$opencode_gstack" + elif [ -d "$opencode_gstack" ] && [ "$opencode_gstack" != "$gstack_dir" ]; then + rm -rf "$opencode_gstack" + fi + + mkdir -p "$opencode_gstack" "$opencode_gstack/browse" "$opencode_gstack/design" "$opencode_gstack/gstack-upgrade" "$opencode_gstack/review" "$opencode_gstack/qa" "$opencode_gstack/plan-devex-review" + + if [ -f "$opencode_dir/gstack/SKILL.md" ]; then + ln -snf "$opencode_dir/gstack/SKILL.md" "$opencode_gstack/SKILL.md" + fi + if [ -d "$gstack_dir/bin" ]; then + ln -snf "$gstack_dir/bin" "$opencode_gstack/bin" + fi + if [ -d "$gstack_dir/browse/dist" ]; then + ln -snf "$gstack_dir/browse/dist" "$opencode_gstack/browse/dist" + fi + if [ -d "$gstack_dir/browse/bin" ]; then + ln -snf "$gstack_dir/browse/bin" "$opencode_gstack/browse/bin" + fi + if [ -d "$gstack_dir/design/dist" ]; then + ln -snf "$gstack_dir/design/dist" "$opencode_gstack/design/dist" + fi + if [ -f "$opencode_dir/gstack-upgrade/SKILL.md" ]; then + ln -snf "$opencode_dir/gstack-upgrade/SKILL.md" "$opencode_gstack/gstack-upgrade/SKILL.md" + fi + for f in checklist.md design-checklist.md greptile-triage.md TODOS-format.md; do + if [ -f "$gstack_dir/review/$f" ]; then + ln -snf "$gstack_dir/review/$f" "$opencode_gstack/review/$f" + fi + done + if [ -d "$gstack_dir/review/specialists" ]; then + ln -snf "$gstack_dir/review/specialists" "$opencode_gstack/review/specialists" + fi + if [ -d "$gstack_dir/qa/templates" ]; then + ln -snf "$gstack_dir/qa/templates" "$opencode_gstack/qa/templates" + fi + if [ -d "$gstack_dir/qa/references" ]; then + ln -snf "$gstack_dir/qa/references" "$opencode_gstack/qa/references" + fi + if [ -f "$gstack_dir/plan-devex-review/dx-hall-of-fame.md" ]; then + ln -snf "$gstack_dir/plan-devex-review/dx-hall-of-fame.md" "$opencode_gstack/plan-devex-review/dx-hall-of-fame.md" + fi + if [ -f "$gstack_dir/ETHOS.md" ]; then + ln -snf "$gstack_dir/ETHOS.md" "$opencode_gstack/ETHOS.md" + fi +} + link_factory_skill_dirs() { local gstack_dir="$1" local skills_dir="$2" @@ -644,6 +709,38 @@ link_factory_skill_dirs() { fi } +link_opencode_skill_dirs() { + local gstack_dir="$1" + local skills_dir="$2" + local opencode_dir="$gstack_dir/.opencode/skills" + local linked=() + + if [ ! -d "$opencode_dir" ]; then + echo " Generating .opencode/ skill docs..." + ( cd "$gstack_dir" && bun run gen:skill-docs --host opencode ) + fi + + if [ ! -d "$opencode_dir" ]; then + echo " warning: .opencode/skills/ generation failed — run 'bun run gen:skill-docs --host opencode' manually" >&2 + return 1 + fi + + for skill_dir in "$opencode_dir"/gstack*/; do + if [ -f "$skill_dir/SKILL.md" ]; then + skill_name="$(basename "$skill_dir")" + [ "$skill_name" = "gstack" ] && continue + target="$skills_dir/$skill_name" + if [ -L "$target" ] || [ ! -e "$target" ]; then + ln -snf "$skill_dir" "$target" + linked+=("$skill_name") + fi + fi + done + if [ ${#linked[@]} -gt 0 ]; then + echo " linked skills: ${linked[*]}" + fi +} + # 4. Install for Claude (default) SKILLS_BASENAME="$(basename "$INSTALL_SKILLS_DIR")" SKILLS_PARENT_BASENAME="$(basename "$(dirname "$INSTALL_SKILLS_DIR")")" @@ -806,6 +903,14 @@ if [ "$INSTALL_FACTORY" -eq 1 ]; then echo " factory skills: $FACTORY_SKILLS" fi +# 6c. Install for OpenCode +if [ "$INSTALL_OPENCODE" -eq 1 ]; then + mkdir -p "$OPENCODE_SKILLS" + create_opencode_runtime_root "$SOURCE_GSTACK_DIR" "$OPENCODE_GSTACK" + link_opencode_skill_dirs "$SOURCE_GSTACK_DIR" "$OPENCODE_SKILLS" + echo "gstack ready (opencode)." + echo " browse: $BROWSE_BIN" + echo " opencode skills: $OPENCODE_SKILLS" # 6c. Install for Copilot if [ "$INSTALL_COPILOT" -eq 1 ]; then COPILOT_DIR="$SOURCE_GSTACK_DIR/.copilot/skills" diff --git a/test/gen-skill-docs.test.ts b/test/gen-skill-docs.test.ts index 6e0d7b3cfb..0199e00d33 100644 --- a/test/gen-skill-docs.test.ts +++ b/test/gen-skill-docs.test.ts @@ -2115,15 +2115,16 @@ describe('setup script validation', () => { expect(fnBody).toContain('rm -f "$target"'); }); - test('setup supports --host auto|claude|codex|kiro|copilot', () => { + test('setup supports --host auto|claude|codex|kiro|opencode|copilot', () => { expect(setupContent).toContain('--host'); - expect(setupContent).toContain('claude|codex|kiro|factory|copilot|auto'); + expect(setupContent).toContain('claude|codex|kiro|factory|opencode|copilot|auto'); }); - test('auto mode detects claude, codex, and kiro binaries', () => { + test('auto mode detects claude, codex, kiro, and opencode binaries', () => { expect(setupContent).toContain('command -v claude'); expect(setupContent).toContain('command -v codex'); expect(setupContent).toContain('command -v kiro-cli'); + expect(setupContent).toContain('command -v opencode'); }); // T1: Sidecar skip guard — prevents .agents/skills/gstack from being linked as a skill @@ -2143,7 +2144,6 @@ describe('setup script validation', () => { expect(content).toContain('$GSTACK_BIN/'); }); - // T3: Kiro host support in setup script test('setup supports --host kiro with install section and sed rewrites', () => { expect(setupContent).toContain('INSTALL_KIRO='); expect(setupContent).toContain('kiro-cli'); @@ -2151,6 +2151,21 @@ describe('setup script validation', () => { expect(setupContent).toContain('~/.kiro/skills/gstack'); }); + test('setup supports --host opencode with install section and OpenCode skill path vars', () => { + expect(setupContent).toContain('INSTALL_OPENCODE='); + expect(setupContent).toContain('OPENCODE_SKILLS="$HOME/.config/opencode/skills"'); + expect(setupContent).toContain('OPENCODE_GSTACK="$OPENCODE_SKILLS/gstack"'); + }); + + test('setup installs OpenCode skills into a nested gstack runtime root', () => { + expect(setupContent).toContain('create_opencode_runtime_root'); + expect(setupContent).toContain('.opencode/skills'); + expect(setupContent).toContain('review/specialists'); + expect(setupContent).toContain('qa/templates'); + expect(setupContent).toContain('qa/references'); + expect(setupContent).toContain('dx-hall-of-fame.md'); + }); + test('create_agents_sidecar links runtime assets', () => { // Sidecar must link bin, browse, review, qa const fnStart = setupContent.indexOf('create_agents_sidecar()'); diff --git a/test/host-config.test.ts b/test/host-config.test.ts index 1a26c21058..ab0d2cc88d 100644 --- a/test/host-config.test.ts +++ b/test/host-config.test.ts @@ -354,6 +354,21 @@ describe('host-config-export.ts CLI', () => { expect(lines).toContain('review/checklist.md'); }); + test('opencode symlinks returns nested runtime assets', () => { + const { stdout, exitCode } = run('symlinks', 'opencode'); + expect(exitCode).toBe(0); + const lines = stdout.split('\n'); + expect(lines).toContain('bin'); + expect(lines).toContain('browse/dist'); + expect(lines).toContain('browse/bin'); + expect(lines).toContain('review/design-checklist.md'); + expect(lines).toContain('review/greptile-triage.md'); + expect(lines).toContain('review/specialists'); + expect(lines).toContain('qa/templates'); + expect(lines).toContain('qa/references'); + expect(lines).toContain('plan-devex-review/dx-hall-of-fame.md'); + }); + test('symlinks with missing host exits 1', () => { const { exitCode } = run('symlinks'); expect(exitCode).toBe(1); diff --git a/test/openclaw-native-skills.test.ts b/test/openclaw-native-skills.test.ts new file mode 100644 index 0000000000..009b5e22c5 --- /dev/null +++ b/test/openclaw-native-skills.test.ts @@ -0,0 +1,35 @@ +import { describe, test, expect } from 'bun:test'; +import * as fs from 'fs'; +import * as path from 'path'; + +const ROOT = path.resolve(import.meta.dir, '..'); + +const OPENCLAW_NATIVE_SKILLS = [ + 'openclaw/skills/gstack-openclaw-investigate/SKILL.md', + 'openclaw/skills/gstack-openclaw-office-hours/SKILL.md', + 'openclaw/skills/gstack-openclaw-ceo-review/SKILL.md', + 'openclaw/skills/gstack-openclaw-retro/SKILL.md', +]; + +function extractFrontmatter(content: string): string { + expect(content.startsWith('---\n')).toBe(true); + const fmEnd = content.indexOf('\n---', 4); + expect(fmEnd).toBeGreaterThan(0); + return content.slice(4, fmEnd); +} + +describe('OpenClaw native skills', () => { + test('frontmatter parses as YAML and keeps only name + description', () => { + for (const skill of OPENCLAW_NATIVE_SKILLS) { + const content = fs.readFileSync(path.join(ROOT, skill), 'utf-8'); + const frontmatter = extractFrontmatter(content); + const parsed = Bun.YAML.parse(frontmatter) as Record; + + expect(Object.keys(parsed).sort()).toEqual(['description', 'name']); + expect(typeof parsed.name).toBe('string'); + expect(typeof parsed.description).toBe('string'); + expect((parsed.name as string).length).toBeGreaterThan(0); + expect((parsed.description as string).length).toBeGreaterThan(0); + } + }); +});