fix(coolify): strip is_build_time from env writes; add reveal + GCS

Coolify v4's POST/PATCH /applications/{uuid}/envs only accepts key,
value, is_preview, is_literal, is_multiline, is_shown_once. Sending
is_build_time triggers a 422 "This field is not allowed." — it's now
a derived read-only flag (is_buildtime) computed from Dockerfile ARG
usage. Breaks agents trying to upsert env vars.

Three-layer fix so this can't regress:
  - lib/coolify.ts: COOLIFY_ENV_WRITE_FIELDS whitelist enforced at the
    network boundary, regardless of caller shape
  - app/api/workspaces/[slug]/apps/[uuid]/envs: stops forwarding the
    field; returns a deprecation warning when callers send it; GET
    reads both is_buildtime and is_build_time for version parity
  - app/api/mcp/route.ts: same treatment in the MCP dispatcher;
    AI_CAPABILITIES.md doc corrected

Also bundles (not related to the above):
  - Workspace API keys are now revealable from settings. New
    key_encrypted column stores AES-256-GCM(VIBN_SECRETS_KEY, token).
    POST /api/workspaces/[slug]/keys/[keyId]/reveal returns plaintext
    for session principals only; API-key principals cannot reveal
    siblings. Legacy keys stay valid for auth but can't reveal.
  - P5.3 Object storage: lib/gcp/storage.ts + lib/workspace-gcs.ts
    idempotently provision a per-workspace GCS bucket, service
    account, IAM binding and HMAC key. New POST /api/workspaces/
    [slug]/storage/buckets endpoint. Migration script + smoke test
    included. Proven end-to-end against prod master-ai-484822.

Made-with: Cursor
This commit is contained in:
2026-04-23 11:46:50 -07:00
parent 651ddf1e11
commit 3192e0f7b9
14 changed files with 1794 additions and 37 deletions

View File

@@ -0,0 +1,25 @@
-- =============================================================================
-- Make workspace API keys revealable.
--
-- Adds `key_encrypted` — base64 of secret-box(VIBN_SECRETS_KEY, plaintext token).
-- Existing rows keep `key_encrypted = NULL` and are therefore NOT revealable;
-- only the hash was stored at mint time and the plaintext is unrecoverable by
-- design. Those keys still work for auth (hash lookup is unchanged); they just
-- can't surface the plaintext again — the UI will flag them as legacy.
--
-- New keys minted after this migration will populate `key_encrypted` and can
-- be revealed on demand by session-authenticated users (never by API-key
-- principals — prevents lateral movement).
--
-- Safe to re-run.
-- =============================================================================
ALTER TABLE vibn_workspace_api_keys
ADD COLUMN IF NOT EXISTS key_encrypted TEXT;
COMMENT ON COLUMN vibn_workspace_api_keys.key_encrypted IS
'base64( AES-256-GCM encrypt(VIBN_SECRETS_KEY, plaintext vibn_sk_...) ). '
'NULL for legacy rows minted before this column existed — those keys '
'remain valid for auth but cannot be revealed.';
SELECT 'API-key revealability migration complete' AS status;

View File

@@ -0,0 +1,39 @@
-- =============================================================================
-- VIBN P5.3 — per-workspace GCS storage columns on vibn_workspaces
--
-- Adds the columns that ensureWorkspaceGcsProvisioned() persists into:
--
-- gcp_service_account_email — workspace's dedicated GCP SA, e.g.
-- vibn-ws-mark@master-ai-484822.iam.gserviceaccount.com
-- gcp_service_account_key_enc — base64( secret-box(SA JSON keyfile) ).
-- Currently only used for runtime auth from app
-- code (env injection); control-plane auth still
-- uses GOOGLE_SERVICE_ACCOUNT_KEY_B64.
-- gcs_default_bucket_name — globally-unique GCS bucket created on first
-- provision, e.g. vibn-ws-mark-a3f9c1.
-- gcs_hmac_access_id — S3-compatible HMAC access key id (plain text;
-- not a secret on its own).
-- gcs_hmac_secret_enc — base64( secret-box(HMAC secret) ). Decrypted
-- only when STORAGE_SECRET_ACCESS_KEY needs to be
-- injected into a Coolify app.
-- gcp_provision_status — independent of provision_status so a partial
-- GCP failure does not flip the whole workspace.
-- Values: 'pending' | 'partial' | 'ready' | 'error'.
-- gcp_provision_error — last error message from the GCP provisioner.
--
-- Safe to re-run.
-- =============================================================================
ALTER TABLE vibn_workspaces
ADD COLUMN IF NOT EXISTS gcp_service_account_email TEXT,
ADD COLUMN IF NOT EXISTS gcp_service_account_key_enc TEXT,
ADD COLUMN IF NOT EXISTS gcs_default_bucket_name TEXT,
ADD COLUMN IF NOT EXISTS gcs_hmac_access_id TEXT,
ADD COLUMN IF NOT EXISTS gcs_hmac_secret_enc TEXT,
ADD COLUMN IF NOT EXISTS gcp_provision_status TEXT NOT NULL DEFAULT 'pending',
ADD COLUMN IF NOT EXISTS gcp_provision_error TEXT;
CREATE INDEX IF NOT EXISTS vibn_workspaces_gcp_status_idx
ON vibn_workspaces (gcp_provision_status);
SELECT 'P5.3 workspace-GCS migration complete' AS status;

View File

@@ -0,0 +1,86 @@
/**
* One-shot: run ensureWorkspaceGcsProvisioned() for a specific workspace
* slug against PROD GCP + PROD Postgres. Idempotent — safe to re-run.
*
* Unlike scripts/smoke-storage-e2e.ts this does NOT clean up; the whole
* point is to persist the workspace's provisioned state into the DB.
*
* Usage:
* cd vibn-frontend
* npx -y dotenv-cli -e ../.google.env -e .env.local -- \
* npx tsx scripts/provision-workspace-gcs.ts <slug>
*
* Required env:
* GOOGLE_SERVICE_ACCOUNT_KEY_B64 (from ../.google.env)
* DATABASE_URL (from .env.local, points at prod vibn-postgres)
* VIBN_SECRETS_KEY (from .env.local, ≥16 chars)
*/
import { queryOne } from '../lib/db-postgres';
import { ensureWorkspaceGcsProvisioned } from '../lib/workspace-gcs';
import type { VibnWorkspace } from '../lib/workspaces';
async function main(): Promise<void> {
const slug = process.argv[2];
if (!slug) {
console.error('Usage: tsx scripts/provision-workspace-gcs.ts <workspace-slug>');
process.exit(2);
}
console.log('━'.repeat(72));
console.log(` Provision GCS for workspace: ${slug}`);
console.log('━'.repeat(72));
// Fetch the current row.
const ws = await queryOne<VibnWorkspace>(
`SELECT * FROM vibn_workspaces WHERE slug = $1`,
[slug],
);
if (!ws) {
console.error(`No vibn_workspaces row found for slug=${slug}`);
process.exit(1);
}
console.log(` id : ${ws.id}`);
console.log(` name : ${ws.name}`);
console.log(` owner_user_id : ${ws.owner_user_id}`);
// @ts-expect-error — new columns not yet in VibnWorkspace type
console.log(` gcp_status : ${ws.gcp_provision_status ?? 'pending'}`);
console.log('');
console.log('Running ensureWorkspaceGcsProvisioned()…');
const result = await ensureWorkspaceGcsProvisioned(ws);
console.log('');
console.log('━'.repeat(72));
console.log(' RESULT');
console.log('━'.repeat(72));
console.log(` status : ${result.status}`);
console.log(` SA : ${result.serviceAccountEmail}`);
console.log(` bucket : ${result.bucket.name}`);
console.log(` location : ${result.bucket.location}`);
console.log(` created : ${result.bucket.timeCreated ?? '(pre-existing)'}`);
console.log(` HMAC accessId : ${result.hmac.accessId}`);
console.log('');
// Re-read to confirm persistence.
const after = await queryOne<Record<string, unknown>>(
`SELECT gcp_service_account_email,
CASE WHEN gcp_service_account_key_enc IS NOT NULL THEN '<enc '||length(gcp_service_account_key_enc)||' b64>' ELSE 'null' END AS sa_key,
gcs_default_bucket_name,
gcs_hmac_access_id,
CASE WHEN gcs_hmac_secret_enc IS NOT NULL THEN '<enc '||length(gcs_hmac_secret_enc)||' b64>' ELSE 'null' END AS hmac_secret,
gcp_provision_status,
gcp_provision_error
FROM vibn_workspaces WHERE id = $1`,
[ws.id],
);
console.log('DB row after:');
console.log(JSON.stringify(after, null, 2));
process.exit(0);
}
main().catch(err => {
console.error('[provision-workspace-gcs] FAILED:', err);
process.exit(1);
});

View File

@@ -0,0 +1,415 @@
/**
* P5.3 — End-to-end smoke for per-workspace GCS provisioning.
*
* What this exercises (against PROD GCP — master-ai-484822):
* 1. ensureWorkspaceServiceAccount → creates a throwaway SA
* (vibn-ws-smoke-{ts}@…). Idempotent.
* 2. createServiceAccountKey → mints + base64-encodes a JSON key.
* 3. createBucket → creates vibn-ws-smoke-{ts}-{6char}
* in northamerica-northeast1 with uniform bucket-level access ON
* and public access prevention enforced.
* 4. addBucketIamBinding → grants the throwaway SA
* roles/storage.objectAdmin on the bucket only.
* 5. createHmacKey → mints S3-compatible HMAC creds
* tied to the throwaway SA.
* 6. (verify) HMAC PUT/GET → uploads a 12-byte object via the
* GCS XML API using AWS SigV4 with the HMAC creds, reads it back,
* deletes it. Proves the credentials actually work.
*
* Cleanup (best-effort, runs even on failure):
* - Deletes the test object.
* - Deactivates + deletes the HMAC key.
* - Deletes all keys on the SA (so the SA itself can be removed).
* - Deletes the bucket.
* - Deletes the SA.
*
* NO Postgres writes. NO Coolify writes. NO project-level IAM changes.
* Everything created has a "smoke-" prefix and a "purpose=smoke" label
* so leftovers are obvious in the GCP console.
*
* Required env (load from /Users/markhenderson/master-ai/.google.env):
* GOOGLE_SERVICE_ACCOUNT_KEY_B64 base64 of vibn-workspace-provisioner SA JSON
* GCP_PROJECT_ID defaults to master-ai-484822
*
* Usage:
* cd vibn-frontend
* npx -y dotenv-cli -e ../.google.env -- npx tsx scripts/smoke-storage-e2e.ts
*/
import { createHash, createHmac } from 'crypto';
import { GCP_PROJECT_ID } from '../lib/gcp-auth';
import {
ensureWorkspaceServiceAccount,
createServiceAccountKey,
workspaceServiceAccountEmail,
workspaceServiceAccountId,
} from '../lib/gcp/iam';
import {
createBucket,
deleteBucket,
addBucketIamBinding,
getBucketIamPolicy,
createHmacKey,
deleteHmacKey,
workspaceDefaultBucketName,
VIBN_GCS_LOCATION,
} from '../lib/gcp/storage';
const ts = Date.now().toString(36);
const SLUG = `smoke-${ts}`;
const SA_EMAIL = workspaceServiceAccountEmail(SLUG);
const SA_ID = workspaceServiceAccountId(SLUG);
const BUCKET = workspaceDefaultBucketName(SLUG);
const TEST_OBJECT_KEY = 'smoke/hello.txt';
const TEST_OBJECT_BODY = 'vibn smoke ✓';
function banner(): void {
console.log('━'.repeat(72));
console.log(' VIBN P5.3 GCS provisioning smoke (PROD GCP — master-ai-484822)');
console.log('━'.repeat(72));
console.log(` project : ${GCP_PROJECT_ID}`);
console.log(` slug : ${SLUG}`);
console.log(` SA : ${SA_EMAIL}`);
console.log(` bucket : ${BUCKET}`);
console.log(` location : ${VIBN_GCS_LOCATION}`);
console.log('');
}
interface State {
saCreated: boolean;
saKeyName?: string;
bucketCreated: boolean;
hmacAccessId?: string;
uploadedObject: boolean;
}
async function main(): Promise<void> {
banner();
const state: State = { saCreated: false, bucketCreated: false, uploadedObject: false };
try {
// ── 1. Service account ────────────────────────────────────────────
console.log('[1/6] Ensure service account…');
const sa = await ensureWorkspaceServiceAccount({ slug: SLUG, workspaceName: SLUG });
state.saCreated = true;
console.log(`${sa.email}`);
// ── 2. Service-account key ────────────────────────────────────────
console.log('[2/6] Mint service-account JSON key…');
const key = await createServiceAccountKey(sa.email);
state.saKeyName = key.name;
console.log(` ✓ key.name=${key.name.split('/').slice(-1)[0]} (privateKeyData ${key.privateKeyData.length} chars b64)`);
// ── 3. Bucket ────────────────────────────────────────────────────
console.log('[3/6] Create bucket (uniform BLA on, public-access prevention enforced)…');
const bucket = await createBucket({
name: BUCKET,
location: VIBN_GCS_LOCATION,
enforcePublicAccessPrevention: true,
workspaceSlug: SLUG,
});
state.bucketCreated = true;
console.log(`${bucket.name} in ${bucket.location}`);
// ── 4. Bucket IAM binding ────────────────────────────────────────
console.log('[4/6] Add roles/storage.objectAdmin binding for the workspace SA…');
await addBucketIamBinding({
bucketName: bucket.name,
role: 'roles/storage.objectAdmin',
member: `serviceAccount:${sa.email}`,
});
const policy = await getBucketIamPolicy(bucket.name);
const binding = policy.bindings?.find(
b => b.role === 'roles/storage.objectAdmin' && b.members.includes(`serviceAccount:${sa.email}`),
);
if (!binding) {
throw new Error('IAM binding did not stick — workspace SA not in objectAdmin members');
}
console.log(` ✓ binding present (${binding.members.length} member(s) on ${binding.role})`);
// ── 5. HMAC key ──────────────────────────────────────────────────
console.log('[5/6] Mint HMAC key for the workspace SA…');
const hmac = await createHmacKey(sa.email);
state.hmacAccessId = hmac.accessId;
console.log(` ✓ accessId=${hmac.accessId} state=${hmac.state}`);
// HMAC keys take a few seconds to become usable on the GCS XML API.
// Without this delay we usually get "InvalidAccessKeyId" on the
// very first request.
console.log(' … waiting 6s for HMAC propagation');
await sleep(6000);
// ── 6. Verify HMAC creds work via S3-compatible XML API ─────────
console.log('[6/6] PUT / GET / DELETE a tiny object via the XML API using HMAC creds…');
await s3PutObject({
accessKeyId: hmac.accessId,
secretAccessKey: hmac.secret,
bucket: bucket.name,
key: TEST_OBJECT_KEY,
body: Buffer.from(TEST_OBJECT_BODY, 'utf-8'),
contentType: 'text/plain; charset=utf-8',
});
state.uploadedObject = true;
console.log(` ✓ PUT ${TEST_OBJECT_KEY}`);
const got = await s3GetObject({
accessKeyId: hmac.accessId,
secretAccessKey: hmac.secret,
bucket: bucket.name,
key: TEST_OBJECT_KEY,
});
if (got.toString('utf-8') !== TEST_OBJECT_BODY) {
throw new Error(`GET body mismatch: ${JSON.stringify(got.toString('utf-8'))}`);
}
console.log(` ✓ GET round-trip body matches`);
await s3DeleteObject({
accessKeyId: hmac.accessId,
secretAccessKey: hmac.secret,
bucket: bucket.name,
key: TEST_OBJECT_KEY,
});
state.uploadedObject = false;
console.log(` ✓ DELETE`);
console.log('');
console.log('━'.repeat(72));
console.log(' SUMMARY');
console.log('━'.repeat(72));
console.log(' SA create+key : ✓');
console.log(' Bucket create : ✓');
console.log(' Bucket IAM binding : ✓');
console.log(' HMAC key + S3 round-trip : ✓');
console.log('');
console.log(' All 4 building blocks of P5.3 vertical slice proven against PROD GCP.');
} catch (err) {
console.error('');
console.error('[smoke-storage-e2e] FAILED:', err);
process.exitCode = 1;
} finally {
console.log('');
console.log('Cleanup…');
await cleanup(state).catch(err => {
console.error('[cleanup] non-fatal error:', err);
});
}
}
async function cleanup(state: State): Promise<void> {
// Object (best-effort; usually already deleted on the happy path).
if (state.uploadedObject && state.hmacAccessId) {
// The credential needed to delete the object lives only in the
// smoke run's memory; if we crashed before saving the secret,
// we can't delete it as the workspace SA. Fall back to deleting
// the bucket which atomically removes contents (deleteBucket
// requires an empty bucket — use force-delete via objects.delete
// listing if it ever matters).
}
// HMAC key.
if (state.hmacAccessId) {
try {
await deleteHmacKey(state.hmacAccessId);
console.log(` ✓ HMAC ${state.hmacAccessId} deleted`);
} catch (err) {
console.warn(` ⚠ HMAC delete failed:`, err);
}
}
// Bucket. Must be empty; if a test object survived, list+delete first.
if (state.bucketCreated) {
try {
// Try a hard delete; if the bucket has objects we'll get 409.
await deleteBucket(BUCKET);
console.log(` ✓ bucket ${BUCKET} deleted`);
} catch (err) {
console.warn(` ⚠ bucket delete failed (objects may remain):`, err);
}
}
// SA keys + SA itself.
if (state.saCreated) {
try {
await deleteAllSaKeysAndSa(SA_EMAIL);
console.log(` ✓ SA ${SA_EMAIL} + keys deleted`);
} catch (err) {
console.warn(` ⚠ SA cleanup failed:`, err);
}
}
}
// ────────────────────────────────────────────────────────────────────
// Helpers — SA cleanup using the IAM API directly (the lib only exposes
// create paths).
// ────────────────────────────────────────────────────────────────────
import { getGcpAccessToken } from '../lib/gcp-auth';
async function deleteAllSaKeysAndSa(email: string): Promise<void> {
const token = await getGcpAccessToken();
const base = `https://iam.googleapis.com/v1/projects/${GCP_PROJECT_ID}/serviceAccounts/${encodeURIComponent(email)}`;
// Delete user-managed keys (system-managed keys can't be deleted).
const listRes = await fetch(`${base}/keys?keyTypes=USER_MANAGED`, {
headers: { Authorization: `Bearer ${token}` },
});
if (listRes.ok) {
const listJson = (await listRes.json()) as { keys?: { name: string }[] };
for (const k of listJson.keys ?? []) {
const id = k.name.split('/').pop();
if (!id) continue;
const delRes = await fetch(`${base}/keys/${id}`, {
method: 'DELETE',
headers: { Authorization: `Bearer ${token}` },
});
if (!delRes.ok && delRes.status !== 404) {
console.warn(` ⚠ key ${id} delete → ${delRes.status}`);
}
}
}
// Delete the SA.
const delRes = await fetch(base, {
method: 'DELETE',
headers: { Authorization: `Bearer ${token}` },
});
if (!delRes.ok && delRes.status !== 404) {
throw new Error(`SA delete → ${delRes.status} ${await delRes.text()}`);
}
}
// ────────────────────────────────────────────────────────────────────
// AWS SigV4 against the GCS XML API
//
// We re-implement SigV4 here rather than pulling in @aws-sdk to keep
// this script dependency-light. GCS treats the bucket as a virtual host
// (https://{bucket}.storage.googleapis.com/{key}) and uses region
// "auto" with service "s3".
// ────────────────────────────────────────────────────────────────────
interface S3Creds {
accessKeyId: string;
secretAccessKey: string;
}
async function s3PutObject(opts: S3Creds & {
bucket: string;
key: string;
body: Buffer;
contentType?: string;
}): Promise<void> {
const url = `https://${opts.bucket}.storage.googleapis.com/${encodeURIComponent(opts.key)}`;
const res = await sigv4Fetch({
method: 'PUT',
url,
body: opts.body,
contentType: opts.contentType,
accessKeyId: opts.accessKeyId,
secretAccessKey: opts.secretAccessKey,
});
if (!res.ok) throw new Error(`PUT ${opts.key}${res.status} ${await res.text()}`);
}
async function s3GetObject(opts: S3Creds & { bucket: string; key: string }): Promise<Buffer> {
const url = `https://${opts.bucket}.storage.googleapis.com/${encodeURIComponent(opts.key)}`;
const res = await sigv4Fetch({
method: 'GET',
url,
accessKeyId: opts.accessKeyId,
secretAccessKey: opts.secretAccessKey,
});
if (!res.ok) throw new Error(`GET ${opts.key}${res.status} ${await res.text()}`);
return Buffer.from(await res.arrayBuffer());
}
async function s3DeleteObject(opts: S3Creds & { bucket: string; key: string }): Promise<void> {
const url = `https://${opts.bucket}.storage.googleapis.com/${encodeURIComponent(opts.key)}`;
const res = await sigv4Fetch({
method: 'DELETE',
url,
accessKeyId: opts.accessKeyId,
secretAccessKey: opts.secretAccessKey,
});
if (!res.ok && res.status !== 404) {
throw new Error(`DELETE ${opts.key}${res.status} ${await res.text()}`);
}
}
interface SigV4FetchOpts extends S3Creds {
method: 'GET' | 'PUT' | 'DELETE';
url: string;
body?: Buffer;
contentType?: string;
}
async function sigv4Fetch(opts: SigV4FetchOpts): Promise<Response> {
const { method, url, body, contentType, accessKeyId, secretAccessKey } = opts;
const u = new URL(url);
const host = u.host;
const path = u.pathname || '/';
const query = u.search.slice(1);
const now = new Date();
const amzDate = now.toISOString().replace(/[:-]|\.\d{3}/g, '');
const dateStamp = amzDate.slice(0, 8);
const region = 'auto';
const service = 's3';
const payloadHash = body
? createHash('sha256').update(body).digest('hex')
: createHash('sha256').update('').digest('hex');
const headers: Record<string, string> = {
host,
'x-amz-date': amzDate,
'x-amz-content-sha256': payloadHash,
};
if (contentType) headers['content-type'] = contentType;
if (body) headers['content-length'] = String(body.length);
const signedHeaders = Object.keys(headers).map(k => k.toLowerCase()).sort().join(';');
const canonicalHeaders =
Object.keys(headers)
.map(k => [k.toLowerCase(), String(headers[k]).trim()] as const)
.sort(([a], [b]) => a.localeCompare(b))
.map(([k, v]) => `${k}:${v}\n`)
.join('');
const canonicalRequest = [
method,
path,
query,
canonicalHeaders,
signedHeaders,
payloadHash,
].join('\n');
const credentialScope = `${dateStamp}/${region}/${service}/aws4_request`;
const stringToSign = [
'AWS4-HMAC-SHA256',
amzDate,
credentialScope,
createHash('sha256').update(canonicalRequest).digest('hex'),
].join('\n');
const kDate = createHmac('sha256', `AWS4${secretAccessKey}`).update(dateStamp).digest();
const kRegion = createHmac('sha256', kDate).update(region).digest();
const kService = createHmac('sha256', kRegion).update(service).digest();
const kSigning = createHmac('sha256', kService).update('aws4_request').digest();
const signature = createHmac('sha256', kSigning).update(stringToSign).digest('hex');
const authorization =
`AWS4-HMAC-SHA256 Credential=${accessKeyId}/${credentialScope}, ` +
`SignedHeaders=${signedHeaders}, Signature=${signature}`;
return fetch(url, {
method,
headers: { ...headers, Authorization: authorization },
body: body ? new Uint8Array(body) : undefined,
});
}
function sleep(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
main();