Files
vibn-frontend/lib/gcp/storage.ts
Mark Henderson 3192e0f7b9 fix(coolify): strip is_build_time from env writes; add reveal + GCS
Coolify v4's POST/PATCH /applications/{uuid}/envs only accepts key,
value, is_preview, is_literal, is_multiline, is_shown_once. Sending
is_build_time triggers a 422 "This field is not allowed." — it's now
a derived read-only flag (is_buildtime) computed from Dockerfile ARG
usage. Breaks agents trying to upsert env vars.

Three-layer fix so this can't regress:
  - lib/coolify.ts: COOLIFY_ENV_WRITE_FIELDS whitelist enforced at the
    network boundary, regardless of caller shape
  - app/api/workspaces/[slug]/apps/[uuid]/envs: stops forwarding the
    field; returns a deprecation warning when callers send it; GET
    reads both is_buildtime and is_build_time for version parity
  - app/api/mcp/route.ts: same treatment in the MCP dispatcher;
    AI_CAPABILITIES.md doc corrected

Also bundles (not related to the above):
  - Workspace API keys are now revealable from settings. New
    key_encrypted column stores AES-256-GCM(VIBN_SECRETS_KEY, token).
    POST /api/workspaces/[slug]/keys/[keyId]/reveal returns plaintext
    for session principals only; API-key principals cannot reveal
    siblings. Legacy keys stay valid for auth but can't reveal.
  - P5.3 Object storage: lib/gcp/storage.ts + lib/workspace-gcs.ts
    idempotently provision a per-workspace GCS bucket, service
    account, IAM binding and HMAC key. New POST /api/workspaces/
    [slug]/storage/buckets endpoint. Migration script + smoke test
    included. Proven end-to-end against prod master-ai-484822.

Made-with: Cursor
2026-04-23 11:46:50 -07:00

342 lines
13 KiB
TypeScript

/**
* Google Cloud Storage driver for per-workspace buckets.
*
* Auth uses the shared `vibn-workspace-provisioner` SA via
* getGcpAccessToken(). That SA needs:
* - roles/storage.admin (create/delete buckets, set IAM)
* - roles/storage.hmacKeyAdmin (mint per-workspace HMAC keys)
*
* All resources are pinned to `northamerica-northeast1` (Montreal) per
* the §0 Substrate constraint. Calls to other regions are refused at
* this layer rather than relying on org policy alone.
*
* APIs:
* - JSON API: https://storage.googleapis.com/storage/v1/... (bucket + IAM)
* - HMAC keys also live under JSON API at .../projects/_/hmacKeys
*/
import { getGcpAccessToken, GCP_PROJECT_ID } from '@/lib/gcp-auth';
const STORAGE_API = 'https://storage.googleapis.com/storage/v1';
/** The only GCS location we will ever provision into. */
export const VIBN_GCS_LOCATION = 'northamerica-northeast1';
async function authedFetch(
method: 'GET' | 'POST' | 'DELETE' | 'PATCH' | 'PUT',
url: string,
body?: unknown,
): Promise<Response> {
const token = await getGcpAccessToken();
const headers: Record<string, string> = {
Authorization: `Bearer ${token}`,
Accept: 'application/json',
};
if (body !== undefined) headers['Content-Type'] = 'application/json';
return fetch(url, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
});
}
async function parseOrThrow<T>(res: Response, context: string): Promise<T> {
const text = await res.text();
if (!res.ok) {
throw new Error(`[gcs ${context} ${res.status}] ${text.slice(0, 500)}`);
}
return text ? (JSON.parse(text) as T) : ({} as T);
}
// ────────────────────────────────────────────────────────────────────
// Bucket naming
// ────────────────────────────────────────────────────────────────────
/**
* GCS bucket names are globally unique across ALL of Google Cloud, so
* we suffix the workspace slug with a deterministic-but-collision-resistant
* 6-char hash derived from `${projectId}/${slug}`. Same workspace + project
* → same bucket name on retry; different projects → no collision.
*
* Format: vibn-ws-<slug>-<6char> (≤63 chars, lowercase, no underscores).
*/
export function workspaceDefaultBucketName(slug: string, projectId = GCP_PROJECT_ID): string {
const safe = slug.toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/-+/g, '-')
.replace(/^-+|-+$/g, '');
// Reserve 8 chars for "vibn-ws-" + 7 for "-<6char>" = 15 → up to 48 chars for slug.
const trimmed = safe.slice(0, 48) || 'workspace';
const hash = shortHash(`${projectId}/${slug}`);
return `vibn-ws-${trimmed}-${hash}`;
}
function shortHash(input: string): string {
// Tiny non-crypto hash → 6 base-36 chars. Good enough to disambiguate
// bucket names; not used for security.
let h = 2166136261 >>> 0;
for (let i = 0; i < input.length; i++) {
h ^= input.charCodeAt(i);
h = Math.imul(h, 16777619) >>> 0;
}
return h.toString(36).padStart(6, '0').slice(0, 6);
}
// ────────────────────────────────────────────────────────────────────
// Bucket types + CRUD
// ────────────────────────────────────────────────────────────────────
export interface GcsBucket {
name: string;
location: string;
storageClass?: string;
selfLink?: string;
timeCreated?: string;
labels?: Record<string, string>;
iamConfiguration?: {
uniformBucketLevelAccess?: { enabled: boolean };
publicAccessPrevention?: 'inherited' | 'enforced';
};
}
export async function getBucket(bucketName: string): Promise<GcsBucket | null> {
const res = await authedFetch(
'GET',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}`,
);
if (res.status === 404) return null;
return parseOrThrow<GcsBucket>(res, 'getBucket');
}
export async function createBucket(opts: {
name: string;
/** Defaults to VIBN_GCS_LOCATION; explicit other values are refused. */
location?: string;
/** Defaults to STANDARD. */
storageClass?: 'STANDARD' | 'NEARLINE' | 'COLDLINE' | 'ARCHIVE';
/** When true, blocks public access at the bucket-level. Default: true. */
enforcePublicAccessPrevention?: boolean;
/** Workspace label so we can list-by-tenant later. */
workspaceSlug?: string;
}): Promise<GcsBucket> {
const location = opts.location ?? VIBN_GCS_LOCATION;
if (location !== VIBN_GCS_LOCATION) {
throw new Error(
`[gcs createBucket] Refused: location=${location}. Vibn buckets must be in ${VIBN_GCS_LOCATION} for Canadian residency.`,
);
}
const body: Record<string, unknown> = {
name: opts.name,
location,
storageClass: opts.storageClass ?? 'STANDARD',
iamConfiguration: {
uniformBucketLevelAccess: { enabled: true },
publicAccessPrevention:
opts.enforcePublicAccessPrevention === false ? 'inherited' : 'enforced',
},
};
if (opts.workspaceSlug) {
body.labels = { workspace: opts.workspaceSlug, managed_by: 'vibn' };
}
const res = await authedFetch(
'POST',
`${STORAGE_API}/b?project=${encodeURIComponent(GCP_PROJECT_ID)}`,
body,
);
if (res.status === 409) {
// Already exists: confirm we own it (label match) and return it.
const existing = await getBucket(opts.name);
if (existing) return existing;
throw new Error(`[gcs createBucket] 409 conflict on ${opts.name} but bucket not retrievable`);
}
return parseOrThrow<GcsBucket>(res, 'createBucket');
}
export async function deleteBucket(bucketName: string): Promise<void> {
const res = await authedFetch('DELETE', `${STORAGE_API}/b/${encodeURIComponent(bucketName)}`);
if (res.status === 404) return;
await parseOrThrow(res, 'deleteBucket');
}
// ────────────────────────────────────────────────────────────────────
// Bucket IAM bindings
//
// We keep bucket policies bucket-scoped (objectAdmin only on this bucket)
// rather than granting project-wide storage roles to per-workspace SAs.
// ────────────────────────────────────────────────────────────────────
interface IamBinding {
role: string;
members: string[];
condition?: { title: string; expression: string };
}
interface IamPolicy {
version?: number;
etag?: string;
bindings?: IamBinding[];
}
export async function getBucketIamPolicy(bucketName: string): Promise<IamPolicy> {
const res = await authedFetch(
'GET',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}/iam?optionsRequestedPolicyVersion=3`,
);
return parseOrThrow<IamPolicy>(res, 'getBucketIamPolicy');
}
async function setBucketIamPolicy(bucketName: string, policy: IamPolicy): Promise<IamPolicy> {
const res = await authedFetch(
'PUT',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}/iam`,
policy,
);
return parseOrThrow<IamPolicy>(res, 'setBucketIamPolicy');
}
/**
* Idempotently grants `member` (e.g. `serviceAccount:foo@…`) the given
* role on the bucket. Returns the updated policy.
*
* Retries with backoff on "Service account ... does not exist" because
* GCP IAM has eventual consistency between the IAM API (which knows
* about a freshly-created SA immediately) and the GCS bucket-policy
* service (which can take a few seconds to learn about it). Without
* this retry, the very first call right after createServiceAccount()
* fails ~50% of the time.
*/
export async function addBucketIamBinding(opts: {
bucketName: string;
role: string;
member: string;
}): Promise<IamPolicy> {
const maxAttempts = 6;
const baseDelayMs = 1500;
let lastErr: unknown;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
const current = await getBucketIamPolicy(opts.bucketName);
const bindings = current.bindings ?? [];
const existing = bindings.find(b => b.role === opts.role && !b.condition);
if (existing && existing.members.includes(opts.member)) return current;
if (existing) {
existing.members = [...new Set([...existing.members, opts.member])];
} else {
bindings.push({ role: opts.role, members: [opts.member] });
}
return await setBucketIamPolicy(opts.bucketName, { ...current, bindings });
} catch (err) {
lastErr = err;
const msg = err instanceof Error ? err.message : String(err);
const isPropagation =
/does not exist/i.test(msg) ||
/Invalid argument/i.test(msg) ||
/Service account .* does not exist/i.test(msg);
if (!isPropagation || attempt === maxAttempts - 1) throw err;
await new Promise(r => setTimeout(r, baseDelayMs * (attempt + 1)));
}
}
throw lastErr ?? new Error('addBucketIamBinding: exhausted retries');
}
// ────────────────────────────────────────────────────────────────────
// HMAC keys (S3-compatibility credentials for app code)
//
// HMAC keys belong to a service account and let standard S3 SDKs
// authenticate against the GCS XML API at storage.googleapis.com. We
// mint one per workspace SA so app code can read/write the workspace's
// bucket using the AWS SDK without us shipping a Google-shaped JSON key
// into the container.
// ────────────────────────────────────────────────────────────────────
export interface GcsHmacKey {
/** Public access ID (looks like an AWS access key id; safe to log). */
accessId: string;
/** Plaintext secret (40 base64 chars). Returned ONCE on creation. */
secret: string;
/** Resource name. */
resourceName?: string;
/** ACTIVE / INACTIVE / DELETED. */
state?: string;
serviceAccountEmail?: string;
}
interface HmacKeyMetadata {
accessId: string;
state: string;
serviceAccountEmail: string;
resourceName?: string;
timeCreated?: string;
}
export async function createHmacKey(serviceAccountEmail: string): Promise<GcsHmacKey> {
// Retry-with-backoff on 404 because the GCS HMAC subsystem has the
// same eventual-consistency lag as bucket-IAM: the SA is real to
// iam.googleapis.com immediately, but storage.googleapis.com may
// 404 on it for several seconds after creation.
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys?serviceAccountEmail=${encodeURIComponent(serviceAccountEmail)}`;
const maxAttempts = 6;
const baseDelayMs = 1500;
let lastErr: unknown;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
const res = await authedFetch('POST', url);
// Body layout per docs: { kind, secret, metadata: { accessId, state, ... } }
const json = await parseOrThrow<{
secret: string;
metadata: HmacKeyMetadata;
}>(res, 'createHmacKey');
return {
accessId: json.metadata.accessId,
secret: json.secret,
resourceName: json.metadata.resourceName,
state: json.metadata.state,
serviceAccountEmail: json.metadata.serviceAccountEmail,
};
} catch (err) {
lastErr = err;
const msg = err instanceof Error ? err.message : String(err);
const isPropagation = /not found|does not exist|404/i.test(msg);
if (!isPropagation || attempt === maxAttempts - 1) throw err;
await new Promise(r => setTimeout(r, baseDelayMs * (attempt + 1)));
}
}
throw lastErr ?? new Error('createHmacKey: exhausted retries');
}
export async function listHmacKeysForServiceAccount(
serviceAccountEmail: string,
): Promise<HmacKeyMetadata[]> {
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys?serviceAccountEmail=${encodeURIComponent(serviceAccountEmail)}&showDeletedKeys=false`;
const res = await authedFetch('GET', url);
const json = await parseOrThrow<{ items?: HmacKeyMetadata[] }>(res, 'listHmacKeys');
return json.items ?? [];
}
export async function deactivateHmacKey(accessId: string): Promise<void> {
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys/${encodeURIComponent(accessId)}`;
const res = await authedFetch('PUT', url, { state: 'INACTIVE' });
await parseOrThrow(res, 'deactivateHmacKey');
}
export async function deleteHmacKey(accessId: string): Promise<void> {
// GCS requires INACTIVE before DELETE. Best-effort deactivate first.
try {
await deactivateHmacKey(accessId);
} catch (err) {
// Ignore "already inactive" errors so cleanup stays idempotent.
const msg = err instanceof Error ? err.message : String(err);
if (!/already inactive|400/i.test(msg)) throw err;
}
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys/${encodeURIComponent(accessId)}`;
const res = await authedFetch('DELETE', url);
if (res.status === 404) return;
await parseOrThrow(res, 'deleteHmacKey');
}