fix(coolify): strip is_build_time from env writes; add reveal + GCS

Coolify v4's POST/PATCH /applications/{uuid}/envs only accepts key,
value, is_preview, is_literal, is_multiline, is_shown_once. Sending
is_build_time triggers a 422 "This field is not allowed." — it's now
a derived read-only flag (is_buildtime) computed from Dockerfile ARG
usage. Breaks agents trying to upsert env vars.

Three-layer fix so this can't regress:
  - lib/coolify.ts: COOLIFY_ENV_WRITE_FIELDS whitelist enforced at the
    network boundary, regardless of caller shape
  - app/api/workspaces/[slug]/apps/[uuid]/envs: stops forwarding the
    field; returns a deprecation warning when callers send it; GET
    reads both is_buildtime and is_build_time for version parity
  - app/api/mcp/route.ts: same treatment in the MCP dispatcher;
    AI_CAPABILITIES.md doc corrected

Also bundles (not related to the above):
  - Workspace API keys are now revealable from settings. New
    key_encrypted column stores AES-256-GCM(VIBN_SECRETS_KEY, token).
    POST /api/workspaces/[slug]/keys/[keyId]/reveal returns plaintext
    for session principals only; API-key principals cannot reveal
    siblings. Legacy keys stay valid for auth but can't reveal.
  - P5.3 Object storage: lib/gcp/storage.ts + lib/workspace-gcs.ts
    idempotently provision a per-workspace GCS bucket, service
    account, IAM binding and HMAC key. New POST /api/workspaces/
    [slug]/storage/buckets endpoint. Migration script + smoke test
    included. Proven end-to-end against prod master-ai-484822.

Made-with: Cursor
This commit is contained in:
2026-04-23 11:46:50 -07:00
parent 651ddf1e11
commit 3192e0f7b9
14 changed files with 1794 additions and 37 deletions

View File

@@ -14,6 +14,7 @@
import { createHash, randomBytes } from 'crypto';
import { NextResponse } from 'next/server';
import { authSession } from '@/lib/auth/session-server';
import { encryptSecret, decryptSecret } from '@/lib/auth/secret-box';
import { query, queryOne } from '@/lib/db-postgres';
import {
type VibnWorkspace,
@@ -167,17 +168,24 @@ export async function mintWorkspaceApiKey(opts: {
const token = `${KEY_PREFIX}${random}`;
const hash = hashKey(token);
const prefix = token.slice(0, 12); // e.g. "vibn_sk_AbCd"
// AES-256-GCM encrypt the plaintext so session-authenticated users can
// reveal the key later (see revealWorkspaceApiKey). Encryption uses
// VIBN_SECRETS_KEY — same envelope as Gitea bot PATs and GCS HMAC
// secrets. If that env var isn't set we'd rather fail loudly here
// than silently mint unrevealable keys.
const encrypted = encryptSecret(token);
const inserted = await query<{ id: string; created_at: Date }>(
`INSERT INTO vibn_workspace_api_keys
(workspace_id, name, key_prefix, key_hash, scopes, created_by)
VALUES ($1, $2, $3, $4, $5::jsonb, $6)
(workspace_id, name, key_prefix, key_hash, key_encrypted, scopes, created_by)
VALUES ($1, $2, $3, $4, $5, $6::jsonb, $7)
RETURNING id, created_at`,
[
opts.workspaceId,
opts.name,
prefix,
hash,
encrypted,
JSON.stringify(opts.scopes ?? ['workspace:*']),
opts.createdBy,
]
@@ -193,6 +201,46 @@ export async function mintWorkspaceApiKey(opts: {
};
}
/**
* Return the plaintext for an active key belonging to the workspace, if
* we have it stored encrypted. Returns `null` when:
* - the key doesn't exist or is in another workspace
* - the key is revoked
* - the key predates the revealability migration (key_encrypted is NULL)
* - decryption fails (VIBN_SECRETS_KEY rotated without re-provisioning)
*
* Intentionally agnostic to auth — the caller MUST have already checked
* that the principal is a session user for this workspace. Never call
* this behind an API-key principal, or a compromised key could exfiltrate
* its siblings.
*/
export async function revealWorkspaceApiKey(
workspaceId: string,
keyId: string,
): Promise<{ id: string; name: string; prefix: string; token: string } | null> {
const row = await queryOne<{
id: string;
name: string;
key_prefix: string;
key_encrypted: string | null;
revoked_at: Date | null;
}>(
`SELECT id, name, key_prefix, key_encrypted, revoked_at
FROM vibn_workspace_api_keys
WHERE id = $1 AND workspace_id = $2
LIMIT 1`,
[keyId, workspaceId],
);
if (!row || row.revoked_at || !row.key_encrypted) return null;
try {
const token = decryptSecret(row.key_encrypted);
return { id: row.id, name: row.name, prefix: row.key_prefix, token };
} catch (err) {
console.error('[reveal] decrypt failed for key', keyId, err);
return null;
}
}
export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
id: string;
name: string;
@@ -202,6 +250,7 @@ export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
last_used_at: Date | null;
revoked_at: Date | null;
created_at: Date;
revealable: boolean;
}>> {
const rows = await query<{
id: string;
@@ -212,8 +261,10 @@ export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
last_used_at: Date | null;
revoked_at: Date | null;
created_at: Date;
revealable: boolean;
}>(
`SELECT id, name, key_prefix, scopes, created_by, last_used_at, revoked_at, created_at
`SELECT id, name, key_prefix, scopes, created_by, last_used_at, revoked_at, created_at,
(key_encrypted IS NOT NULL) AS revealable
FROM vibn_workspace_api_keys
WHERE workspace_id = $1
ORDER BY created_at DESC`,
@@ -228,6 +279,7 @@ export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
last_used_at: r.last_used_at,
revoked_at: r.revoked_at,
created_at: r.created_at,
revealable: r.revealable,
}));
}

View File

@@ -61,15 +61,68 @@ export interface CoolifyApplication {
environment?: { id?: number; project_uuid?: string; project?: { uuid?: string } };
}
/**
* Coolify env var, as returned by GET /applications/{uuid}/envs.
*
* NOTE on build-time vars: Coolify removed `is_build_time` from the
* **write** schema some time ago. The flag is now a derived read-only
* attribute (`is_buildtime`, one word) computed from whether the var
* is referenced as a Dockerfile ARG. `is_build_time` (underscored) is
* kept here only to tolerate very old read responses — never send it
* on POST/PATCH. See `COOLIFY_ENV_WRITE_FIELDS` below.
*/
export interface CoolifyEnvVar {
uuid?: string;
key: string;
value: string;
is_preview?: boolean;
/** @deprecated read-only, derived server-side. Do not send on write. */
is_build_time?: boolean;
/** Newer one-word spelling of the same derived read-only flag. */
is_buildtime?: boolean;
is_runtime?: boolean;
is_literal?: boolean;
is_multiline?: boolean;
is_shown_once?: boolean;
is_shared?: boolean;
}
/**
* The only fields Coolify v4 accepts on POST/PATCH /applications/{uuid}/envs.
* Any other field (notably `is_build_time`) triggers a 422
* "This field is not allowed." Build-time vs runtime is no longer a
* writable flag — Coolify infers it at build time.
*
* Source of truth:
* https://coolify.io/docs/api-reference/api/operations/update-env-by-application-uuid
* https://coolify.io/docs/api-reference/api/operations/create-env-by-application-uuid
*/
const COOLIFY_ENV_WRITE_FIELDS = [
'key',
'value',
'is_preview',
'is_literal',
'is_multiline',
'is_shown_once',
] as const;
type CoolifyEnvWritePayload = {
key: string;
value: string;
is_preview?: boolean;
is_literal?: boolean;
is_multiline?: boolean;
is_shown_once?: boolean;
};
function toCoolifyEnvWritePayload(env: CoolifyEnvVar): CoolifyEnvWritePayload {
const src = env as unknown as Record<string, unknown>;
const out: Record<string, unknown> = {};
for (const k of COOLIFY_ENV_WRITE_FIELDS) {
const v = src[k];
if (v !== undefined) out[k] = v;
}
return out as CoolifyEnvWritePayload;
}
export interface CoolifyPrivateKey {
@@ -539,17 +592,22 @@ export async function upsertApplicationEnv(
uuid: string,
env: CoolifyEnvVar & { is_preview?: boolean }
): Promise<CoolifyEnvVar> {
// Strip any read-only/derived fields (`is_build_time`, `is_buildtime`,
// `is_runtime`, `is_shared`, `uuid`) before sending — Coolify returns
// 422 "This field is not allowed." for anything outside the write
// schema. See COOLIFY_ENV_WRITE_FIELDS.
const payload = toCoolifyEnvWritePayload(env);
try {
return await coolifyFetch(`/applications/${uuid}/envs`, {
method: 'PATCH',
body: JSON.stringify(env),
body: JSON.stringify(payload),
});
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
if (msg.includes('404') || msg.includes('405')) {
return coolifyFetch(`/applications/${uuid}/envs`, {
method: 'POST',
body: JSON.stringify(env),
body: JSON.stringify(payload),
});
}
throw err;

145
lib/gcp/iam.ts Normal file
View File

@@ -0,0 +1,145 @@
/**
* Google Cloud IAM driver — service-account creation + key minting.
*
* Auth uses the shared `vibn-workspace-provisioner` SA via getGcpAccessToken().
* That SA needs `roles/iam.serviceAccountAdmin` and `roles/iam.serviceAccountKeyAdmin`
* at the project level, plus `roles/iam.serviceAccountUser` so it can act as the
* SAs it creates.
*
* All calls go through https://iam.googleapis.com/v1.
*/
import { getGcpAccessToken, GCP_PROJECT_ID } from '@/lib/gcp-auth';
const IAM_API = 'https://iam.googleapis.com/v1';
async function authedFetch(
method: 'GET' | 'POST' | 'DELETE' | 'PATCH',
url: string,
body?: unknown,
): Promise<Response> {
const token = await getGcpAccessToken();
const headers: Record<string, string> = {
Authorization: `Bearer ${token}`,
Accept: 'application/json',
};
if (body) headers['Content-Type'] = 'application/json';
return fetch(url, {
method,
headers,
body: body ? JSON.stringify(body) : undefined,
});
}
async function parseOrThrow<T>(res: Response, context: string): Promise<T> {
const text = await res.text();
if (!res.ok) {
throw new Error(`[gcp-iam ${context} ${res.status}] ${text.slice(0, 500)}`);
}
return text ? (JSON.parse(text) as T) : ({} as T);
}
// ────────────────────────────────────────────────────────────────────
// Service-account naming
// ────────────────────────────────────────────────────────────────────
/**
* GCP service-account IDs are 6-30 chars, [a-z][a-z0-9-]{4,28}[a-z0-9].
* Some workspace slugs are too long or have edge characters, so normalize.
*/
export function workspaceServiceAccountId(slug: string): string {
const safe = slug.toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/-+/g, '-');
// Reserve "vibn-ws-" prefix (8 chars) → up to 22 left for the slug.
const trimmed = safe.replace(/^-+|-+$/g, '').slice(0, 22) || 'workspace';
const padded = trimmed.length < 4 ? `${trimmed}-ws` : trimmed;
return `vibn-ws-${padded}`;
}
export function workspaceServiceAccountEmail(slug: string, projectId = GCP_PROJECT_ID): string {
return `${workspaceServiceAccountId(slug)}@${projectId}.iam.gserviceaccount.com`;
}
// ────────────────────────────────────────────────────────────────────
// Service-account CRUD
// ────────────────────────────────────────────────────────────────────
export interface GcpServiceAccount {
name: string;
email: string;
uniqueId: string;
displayName?: string;
description?: string;
}
export async function getServiceAccount(email: string): Promise<GcpServiceAccount | null> {
const url = `${IAM_API}/projects/${GCP_PROJECT_ID}/serviceAccounts/${encodeURIComponent(email)}`;
const res = await authedFetch('GET', url);
if (res.status === 404) return null;
return parseOrThrow<GcpServiceAccount>(res, 'getServiceAccount');
}
export async function createServiceAccount(opts: {
accountId: string;
displayName: string;
description?: string;
}): Promise<GcpServiceAccount> {
const url = `${IAM_API}/projects/${GCP_PROJECT_ID}/serviceAccounts`;
const res = await authedFetch('POST', url, {
accountId: opts.accountId,
serviceAccount: {
displayName: opts.displayName,
description: opts.description,
},
});
// Race-safe: if it was just created concurrently, fetch the existing one.
if (res.status === 409) {
const email = `${opts.accountId}@${GCP_PROJECT_ID}.iam.gserviceaccount.com`;
const existing = await getServiceAccount(email);
if (existing) return existing;
}
return parseOrThrow<GcpServiceAccount>(res, 'createServiceAccount');
}
/**
* Idempotently ensures the workspace's SA exists. Returns its email.
*/
export async function ensureWorkspaceServiceAccount(opts: {
slug: string;
workspaceName?: string;
}): Promise<GcpServiceAccount> {
const email = workspaceServiceAccountEmail(opts.slug);
const existing = await getServiceAccount(email);
if (existing) return existing;
return createServiceAccount({
accountId: workspaceServiceAccountId(opts.slug),
displayName: `Vibn workspace: ${opts.workspaceName ?? opts.slug}`,
description: `Auto-provisioned by Vibn for workspace "${opts.slug}". Owns workspace-scoped GCS bucket(s) and (eventually) Cloud Tasks queues + Scheduler jobs.`,
});
}
// ────────────────────────────────────────────────────────────────────
// Service-account key minting
//
// We mint a JSON keyfile per workspace once at provision time and store
// it encrypted. Currently only used so app code can authenticate as the
// workspace's SA (e.g. to call GCS / Cloud Tasks from inside a deployed
// container). The control-plane itself uses the shared provisioner SA.
// ────────────────────────────────────────────────────────────────────
export interface GcpServiceAccountKey {
/** Resource name, e.g. projects/.../serviceAccounts/.../keys/<id>. */
name: string;
/** Base64-encoded JSON keyfile (Google's privateKeyData format). */
privateKeyData: string;
}
export async function createServiceAccountKey(saEmail: string): Promise<GcpServiceAccountKey> {
const url = `${IAM_API}/projects/${GCP_PROJECT_ID}/serviceAccounts/${encodeURIComponent(
saEmail,
)}/keys`;
const res = await authedFetch('POST', url, {
privateKeyType: 'TYPE_GOOGLE_CREDENTIALS_FILE',
keyAlgorithm: 'KEY_ALG_RSA_2048',
});
return parseOrThrow<GcpServiceAccountKey>(res, 'createServiceAccountKey');
}

341
lib/gcp/storage.ts Normal file
View File

@@ -0,0 +1,341 @@
/**
* Google Cloud Storage driver for per-workspace buckets.
*
* Auth uses the shared `vibn-workspace-provisioner` SA via
* getGcpAccessToken(). That SA needs:
* - roles/storage.admin (create/delete buckets, set IAM)
* - roles/storage.hmacKeyAdmin (mint per-workspace HMAC keys)
*
* All resources are pinned to `northamerica-northeast1` (Montreal) per
* the §0 Substrate constraint. Calls to other regions are refused at
* this layer rather than relying on org policy alone.
*
* APIs:
* - JSON API: https://storage.googleapis.com/storage/v1/... (bucket + IAM)
* - HMAC keys also live under JSON API at .../projects/_/hmacKeys
*/
import { getGcpAccessToken, GCP_PROJECT_ID } from '@/lib/gcp-auth';
const STORAGE_API = 'https://storage.googleapis.com/storage/v1';
/** The only GCS location we will ever provision into. */
export const VIBN_GCS_LOCATION = 'northamerica-northeast1';
async function authedFetch(
method: 'GET' | 'POST' | 'DELETE' | 'PATCH' | 'PUT',
url: string,
body?: unknown,
): Promise<Response> {
const token = await getGcpAccessToken();
const headers: Record<string, string> = {
Authorization: `Bearer ${token}`,
Accept: 'application/json',
};
if (body !== undefined) headers['Content-Type'] = 'application/json';
return fetch(url, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
});
}
async function parseOrThrow<T>(res: Response, context: string): Promise<T> {
const text = await res.text();
if (!res.ok) {
throw new Error(`[gcs ${context} ${res.status}] ${text.slice(0, 500)}`);
}
return text ? (JSON.parse(text) as T) : ({} as T);
}
// ────────────────────────────────────────────────────────────────────
// Bucket naming
// ────────────────────────────────────────────────────────────────────
/**
* GCS bucket names are globally unique across ALL of Google Cloud, so
* we suffix the workspace slug with a deterministic-but-collision-resistant
* 6-char hash derived from `${projectId}/${slug}`. Same workspace + project
* → same bucket name on retry; different projects → no collision.
*
* Format: vibn-ws-<slug>-<6char> (≤63 chars, lowercase, no underscores).
*/
export function workspaceDefaultBucketName(slug: string, projectId = GCP_PROJECT_ID): string {
const safe = slug.toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/-+/g, '-')
.replace(/^-+|-+$/g, '');
// Reserve 8 chars for "vibn-ws-" + 7 for "-<6char>" = 15 → up to 48 chars for slug.
const trimmed = safe.slice(0, 48) || 'workspace';
const hash = shortHash(`${projectId}/${slug}`);
return `vibn-ws-${trimmed}-${hash}`;
}
function shortHash(input: string): string {
// Tiny non-crypto hash → 6 base-36 chars. Good enough to disambiguate
// bucket names; not used for security.
let h = 2166136261 >>> 0;
for (let i = 0; i < input.length; i++) {
h ^= input.charCodeAt(i);
h = Math.imul(h, 16777619) >>> 0;
}
return h.toString(36).padStart(6, '0').slice(0, 6);
}
// ────────────────────────────────────────────────────────────────────
// Bucket types + CRUD
// ────────────────────────────────────────────────────────────────────
export interface GcsBucket {
name: string;
location: string;
storageClass?: string;
selfLink?: string;
timeCreated?: string;
labels?: Record<string, string>;
iamConfiguration?: {
uniformBucketLevelAccess?: { enabled: boolean };
publicAccessPrevention?: 'inherited' | 'enforced';
};
}
export async function getBucket(bucketName: string): Promise<GcsBucket | null> {
const res = await authedFetch(
'GET',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}`,
);
if (res.status === 404) return null;
return parseOrThrow<GcsBucket>(res, 'getBucket');
}
export async function createBucket(opts: {
name: string;
/** Defaults to VIBN_GCS_LOCATION; explicit other values are refused. */
location?: string;
/** Defaults to STANDARD. */
storageClass?: 'STANDARD' | 'NEARLINE' | 'COLDLINE' | 'ARCHIVE';
/** When true, blocks public access at the bucket-level. Default: true. */
enforcePublicAccessPrevention?: boolean;
/** Workspace label so we can list-by-tenant later. */
workspaceSlug?: string;
}): Promise<GcsBucket> {
const location = opts.location ?? VIBN_GCS_LOCATION;
if (location !== VIBN_GCS_LOCATION) {
throw new Error(
`[gcs createBucket] Refused: location=${location}. Vibn buckets must be in ${VIBN_GCS_LOCATION} for Canadian residency.`,
);
}
const body: Record<string, unknown> = {
name: opts.name,
location,
storageClass: opts.storageClass ?? 'STANDARD',
iamConfiguration: {
uniformBucketLevelAccess: { enabled: true },
publicAccessPrevention:
opts.enforcePublicAccessPrevention === false ? 'inherited' : 'enforced',
},
};
if (opts.workspaceSlug) {
body.labels = { workspace: opts.workspaceSlug, managed_by: 'vibn' };
}
const res = await authedFetch(
'POST',
`${STORAGE_API}/b?project=${encodeURIComponent(GCP_PROJECT_ID)}`,
body,
);
if (res.status === 409) {
// Already exists: confirm we own it (label match) and return it.
const existing = await getBucket(opts.name);
if (existing) return existing;
throw new Error(`[gcs createBucket] 409 conflict on ${opts.name} but bucket not retrievable`);
}
return parseOrThrow<GcsBucket>(res, 'createBucket');
}
export async function deleteBucket(bucketName: string): Promise<void> {
const res = await authedFetch('DELETE', `${STORAGE_API}/b/${encodeURIComponent(bucketName)}`);
if (res.status === 404) return;
await parseOrThrow(res, 'deleteBucket');
}
// ────────────────────────────────────────────────────────────────────
// Bucket IAM bindings
//
// We keep bucket policies bucket-scoped (objectAdmin only on this bucket)
// rather than granting project-wide storage roles to per-workspace SAs.
// ────────────────────────────────────────────────────────────────────
interface IamBinding {
role: string;
members: string[];
condition?: { title: string; expression: string };
}
interface IamPolicy {
version?: number;
etag?: string;
bindings?: IamBinding[];
}
export async function getBucketIamPolicy(bucketName: string): Promise<IamPolicy> {
const res = await authedFetch(
'GET',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}/iam?optionsRequestedPolicyVersion=3`,
);
return parseOrThrow<IamPolicy>(res, 'getBucketIamPolicy');
}
async function setBucketIamPolicy(bucketName: string, policy: IamPolicy): Promise<IamPolicy> {
const res = await authedFetch(
'PUT',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}/iam`,
policy,
);
return parseOrThrow<IamPolicy>(res, 'setBucketIamPolicy');
}
/**
* Idempotently grants `member` (e.g. `serviceAccount:foo@…`) the given
* role on the bucket. Returns the updated policy.
*
* Retries with backoff on "Service account ... does not exist" because
* GCP IAM has eventual consistency between the IAM API (which knows
* about a freshly-created SA immediately) and the GCS bucket-policy
* service (which can take a few seconds to learn about it). Without
* this retry, the very first call right after createServiceAccount()
* fails ~50% of the time.
*/
export async function addBucketIamBinding(opts: {
bucketName: string;
role: string;
member: string;
}): Promise<IamPolicy> {
const maxAttempts = 6;
const baseDelayMs = 1500;
let lastErr: unknown;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
const current = await getBucketIamPolicy(opts.bucketName);
const bindings = current.bindings ?? [];
const existing = bindings.find(b => b.role === opts.role && !b.condition);
if (existing && existing.members.includes(opts.member)) return current;
if (existing) {
existing.members = [...new Set([...existing.members, opts.member])];
} else {
bindings.push({ role: opts.role, members: [opts.member] });
}
return await setBucketIamPolicy(opts.bucketName, { ...current, bindings });
} catch (err) {
lastErr = err;
const msg = err instanceof Error ? err.message : String(err);
const isPropagation =
/does not exist/i.test(msg) ||
/Invalid argument/i.test(msg) ||
/Service account .* does not exist/i.test(msg);
if (!isPropagation || attempt === maxAttempts - 1) throw err;
await new Promise(r => setTimeout(r, baseDelayMs * (attempt + 1)));
}
}
throw lastErr ?? new Error('addBucketIamBinding: exhausted retries');
}
// ────────────────────────────────────────────────────────────────────
// HMAC keys (S3-compatibility credentials for app code)
//
// HMAC keys belong to a service account and let standard S3 SDKs
// authenticate against the GCS XML API at storage.googleapis.com. We
// mint one per workspace SA so app code can read/write the workspace's
// bucket using the AWS SDK without us shipping a Google-shaped JSON key
// into the container.
// ────────────────────────────────────────────────────────────────────
export interface GcsHmacKey {
/** Public access ID (looks like an AWS access key id; safe to log). */
accessId: string;
/** Plaintext secret (40 base64 chars). Returned ONCE on creation. */
secret: string;
/** Resource name. */
resourceName?: string;
/** ACTIVE / INACTIVE / DELETED. */
state?: string;
serviceAccountEmail?: string;
}
interface HmacKeyMetadata {
accessId: string;
state: string;
serviceAccountEmail: string;
resourceName?: string;
timeCreated?: string;
}
export async function createHmacKey(serviceAccountEmail: string): Promise<GcsHmacKey> {
// Retry-with-backoff on 404 because the GCS HMAC subsystem has the
// same eventual-consistency lag as bucket-IAM: the SA is real to
// iam.googleapis.com immediately, but storage.googleapis.com may
// 404 on it for several seconds after creation.
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys?serviceAccountEmail=${encodeURIComponent(serviceAccountEmail)}`;
const maxAttempts = 6;
const baseDelayMs = 1500;
let lastErr: unknown;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
const res = await authedFetch('POST', url);
// Body layout per docs: { kind, secret, metadata: { accessId, state, ... } }
const json = await parseOrThrow<{
secret: string;
metadata: HmacKeyMetadata;
}>(res, 'createHmacKey');
return {
accessId: json.metadata.accessId,
secret: json.secret,
resourceName: json.metadata.resourceName,
state: json.metadata.state,
serviceAccountEmail: json.metadata.serviceAccountEmail,
};
} catch (err) {
lastErr = err;
const msg = err instanceof Error ? err.message : String(err);
const isPropagation = /not found|does not exist|404/i.test(msg);
if (!isPropagation || attempt === maxAttempts - 1) throw err;
await new Promise(r => setTimeout(r, baseDelayMs * (attempt + 1)));
}
}
throw lastErr ?? new Error('createHmacKey: exhausted retries');
}
export async function listHmacKeysForServiceAccount(
serviceAccountEmail: string,
): Promise<HmacKeyMetadata[]> {
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys?serviceAccountEmail=${encodeURIComponent(serviceAccountEmail)}&showDeletedKeys=false`;
const res = await authedFetch('GET', url);
const json = await parseOrThrow<{ items?: HmacKeyMetadata[] }>(res, 'listHmacKeys');
return json.items ?? [];
}
export async function deactivateHmacKey(accessId: string): Promise<void> {
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys/${encodeURIComponent(accessId)}`;
const res = await authedFetch('PUT', url, { state: 'INACTIVE' });
await parseOrThrow(res, 'deactivateHmacKey');
}
export async function deleteHmacKey(accessId: string): Promise<void> {
// GCS requires INACTIVE before DELETE. Best-effort deactivate first.
try {
await deactivateHmacKey(accessId);
} catch (err) {
// Ignore "already inactive" errors so cleanup stays idempotent.
const msg = err instanceof Error ? err.message : String(err);
if (!/already inactive|400/i.test(msg)) throw err;
}
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys/${encodeURIComponent(accessId)}`;
const res = await authedFetch('DELETE', url);
if (res.status === 404) return;
await parseOrThrow(res, 'deleteHmacKey');
}

280
lib/workspace-gcs.ts Normal file
View File

@@ -0,0 +1,280 @@
/**
* Per-workspace GCS provisioning (P5.3).
*
* Idempotently sets up everything a workspace needs to do object storage:
* 1. A dedicated GCP service account (vibn-ws-{slug}@…)
* 2. A JSON keyfile for that SA (encrypted at rest)
* 3. A default GCS bucket (vibn-ws-{slug}-{6char}) in northamerica-northeast1
* 4. A bucket-scoped roles/storage.objectAdmin binding for the SA
* 5. An HMAC key on the SA so app code can use AWS S3 SDKs
*
* Persists IDs + encrypted secrets onto vibn_workspaces. Safe to re-run;
* each step is idempotent and short-circuits when already complete.
*
* Required schema migration: scripts/migrate-workspace-gcs.sql
*
* The control plane itself never decrypts the per-workspace SA key — it
* always authenticates as the shared `vibn-workspace-provisioner`. The
* per-workspace credentials exist solely to be injected into deployed
* Coolify apps as STORAGE_* env vars (see app env injection in
* apps/route.ts when wired up).
*/
import { query, queryOne } from '@/lib/db-postgres';
import { encryptSecret, decryptSecret } from '@/lib/auth/secret-box';
import {
ensureWorkspaceServiceAccount,
workspaceServiceAccountEmail,
createServiceAccountKey,
} from '@/lib/gcp/iam';
import {
createBucket,
getBucket,
addBucketIamBinding,
createHmacKey,
listHmacKeysForServiceAccount,
workspaceDefaultBucketName,
VIBN_GCS_LOCATION,
type GcsBucket,
} from '@/lib/gcp/storage';
import type { VibnWorkspace } from '@/lib/workspaces';
/**
* Extra columns added by scripts/migrate-workspace-gcs.sql. We model
* them as a separate interface so the existing `VibnWorkspace` shape
* doesn't have to be touched until every caller is ready.
*/
export interface VibnWorkspaceGcs {
gcp_service_account_email: string | null;
gcp_service_account_key_enc: string | null;
gcs_default_bucket_name: string | null;
gcs_hmac_access_id: string | null;
gcs_hmac_secret_enc: string | null;
gcp_provision_status: 'pending' | 'partial' | 'ready' | 'error';
gcp_provision_error: string | null;
}
export type WorkspaceGcs = VibnWorkspace & VibnWorkspaceGcs;
export async function getWorkspaceGcsState(workspaceId: string): Promise<WorkspaceGcs | null> {
return queryOne<WorkspaceGcs>(
`SELECT * FROM vibn_workspaces WHERE id = $1`,
[workspaceId],
);
}
/** What we tell the API caller after a successful provision. */
export interface WorkspaceGcsResult {
serviceAccountEmail: string;
bucket: {
name: string;
location: string;
selfLink?: string;
timeCreated?: string;
};
hmac: {
accessId: string;
};
status: VibnWorkspaceGcs['gcp_provision_status'];
}
/**
* Idempotent: ensures the workspace has a GCP SA + key + default bucket
* + IAM binding + HMAC key. Updates vibn_workspaces with the resulting
* identifiers (key + secret stored encrypted). Returns a flat summary
* suitable for sending back to the API caller.
*
* Throws on any irrecoverable error; transient/partial failures land in
* the row's gcp_provision_status='partial' with the message in
* gcp_provision_error.
*/
export async function ensureWorkspaceGcsProvisioned(
workspace: VibnWorkspace,
): Promise<WorkspaceGcsResult> {
const ws = (await getWorkspaceGcsState(workspace.id)) ?? (workspace as WorkspaceGcs);
// ── Short-circuit if everything is already there.
if (
ws.gcp_provision_status === 'ready' &&
ws.gcp_service_account_email &&
ws.gcs_default_bucket_name &&
ws.gcs_hmac_access_id
) {
const existing = await getBucket(ws.gcs_default_bucket_name);
if (existing) {
return {
serviceAccountEmail: ws.gcp_service_account_email,
bucket: {
name: existing.name,
location: existing.location,
selfLink: existing.selfLink,
timeCreated: existing.timeCreated,
},
hmac: { accessId: ws.gcs_hmac_access_id },
status: 'ready',
};
}
// Bucket vanished out from under us (manual gcloud delete?). Fall
// through and re-provision; the SA + HMAC can stay.
}
let saEmail = ws.gcp_service_account_email;
let saKeyEnc = ws.gcp_service_account_key_enc;
let bucketName = ws.gcs_default_bucket_name;
let hmacAccessId = ws.gcs_hmac_access_id;
let hmacSecretEnc = ws.gcs_hmac_secret_enc;
let bucket: GcsBucket | null = null;
const errors: string[] = [];
// ── 1. Service account ─────────────────────────────────────────────
try {
const sa = await ensureWorkspaceServiceAccount({
slug: workspace.slug,
workspaceName: workspace.name,
});
saEmail = sa.email;
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcp-sa: ${msg}`);
saEmail = saEmail ?? workspaceServiceAccountEmail(workspace.slug);
}
// ── 2. SA keyfile ─────────────────────────────────────────────────
// Mint once. Rotation is a separate flow (Tier 2 territory).
if (!saKeyEnc && saEmail && !errors.some(e => e.startsWith('gcp-sa:'))) {
try {
const key = await createServiceAccountKey(saEmail);
// privateKeyData is already base64; we encrypt the whole base64
// payload so the column can stay TEXT and reuse secret-box.
saKeyEnc = encryptSecret(key.privateKeyData);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcp-sa-key: ${msg}`);
}
}
// ── 3. Default bucket ──────────────────────────────────────────────
if (!bucketName) bucketName = workspaceDefaultBucketName(workspace.slug);
if (!errors.some(e => e.startsWith('gcp-sa:'))) {
try {
bucket =
(await getBucket(bucketName)) ??
(await createBucket({
name: bucketName,
location: VIBN_GCS_LOCATION,
enforcePublicAccessPrevention: true,
workspaceSlug: workspace.slug,
}));
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcs-bucket: ${msg}`);
}
}
// ── 4. Bucket IAM binding for the workspace SA ─────────────────────
if (bucket && saEmail) {
try {
await addBucketIamBinding({
bucketName: bucket.name,
role: 'roles/storage.objectAdmin',
member: `serviceAccount:${saEmail}`,
});
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcs-iam: ${msg}`);
}
}
// ── 5. HMAC key for app code ───────────────────────────────────────
// Only mint if we don't already have one. GCS limits 5 active keys
// per SA; we never want to thrash this.
if (saEmail && !hmacAccessId) {
try {
// Defensive: if a previous run minted a key but failed before
// saving, reuse the existing ACTIVE one instead of stacking.
const existingHmacs = await listHmacKeysForServiceAccount(saEmail);
const active = existingHmacs.find(k => k.state === 'ACTIVE');
if (active) {
hmacAccessId = active.accessId;
// We can't recover the secret of a previously-minted key; leave
// the encrypted secret null and let the operator rotate if they
// need it injected.
} else {
const minted = await createHmacKey(saEmail);
hmacAccessId = minted.accessId;
hmacSecretEnc = encryptSecret(minted.secret);
}
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcs-hmac: ${msg}`);
}
}
const allReady = !!(saEmail && saKeyEnc && bucket && hmacAccessId && errors.length === 0);
const status: VibnWorkspaceGcs['gcp_provision_status'] = allReady
? 'ready'
: errors.length > 0
? 'partial'
: 'pending';
await query(
`UPDATE vibn_workspaces
SET gcp_service_account_email = COALESCE($2, gcp_service_account_email),
gcp_service_account_key_enc = COALESCE($3, gcp_service_account_key_enc),
gcs_default_bucket_name = COALESCE($4, gcs_default_bucket_name),
gcs_hmac_access_id = COALESCE($5, gcs_hmac_access_id),
gcs_hmac_secret_enc = COALESCE($6, gcs_hmac_secret_enc),
gcp_provision_status = $7,
gcp_provision_error = $8,
updated_at = now()
WHERE id = $1`,
[
workspace.id,
saEmail,
saKeyEnc,
bucket?.name ?? bucketName,
hmacAccessId,
hmacSecretEnc,
status,
errors.length ? errors.join('; ') : null,
],
);
if (!saEmail) throw new Error(`workspace-gcs: SA email never resolved: ${errors.join('; ')}`);
if (!bucket) throw new Error(`workspace-gcs: bucket never created: ${errors.join('; ')}`);
return {
serviceAccountEmail: saEmail,
bucket: {
name: bucket.name,
location: bucket.location,
selfLink: bucket.selfLink,
timeCreated: bucket.timeCreated,
},
hmac: { accessId: hmacAccessId ?? '' },
status,
};
}
/**
* Decrypt the workspace's HMAC secret for STORAGE_SECRET_ACCESS_KEY env
* injection. Returns null when not provisioned or decrypt fails.
*
* Callers MUST treat this as shown-once material: log neither the
* value nor anything that contains it.
*/
export function getWorkspaceGcsHmacCredentials(ws: WorkspaceGcs): {
accessId: string;
secret: string;
} | null {
if (!ws.gcs_hmac_access_id || !ws.gcs_hmac_secret_enc) return null;
try {
return {
accessId: ws.gcs_hmac_access_id,
secret: decryptSecret(ws.gcs_hmac_secret_enc),
};
} catch (err) {
console.error('[workspace-gcs] failed to decrypt HMAC secret for', ws.slug, err);
return null;
}
}