fix(coolify): strip is_build_time from env writes; add reveal + GCS

Coolify v4's POST/PATCH /applications/{uuid}/envs only accepts key,
value, is_preview, is_literal, is_multiline, is_shown_once. Sending
is_build_time triggers a 422 "This field is not allowed." — it's now
a derived read-only flag (is_buildtime) computed from Dockerfile ARG
usage. Breaks agents trying to upsert env vars.

Three-layer fix so this can't regress:
  - lib/coolify.ts: COOLIFY_ENV_WRITE_FIELDS whitelist enforced at the
    network boundary, regardless of caller shape
  - app/api/workspaces/[slug]/apps/[uuid]/envs: stops forwarding the
    field; returns a deprecation warning when callers send it; GET
    reads both is_buildtime and is_build_time for version parity
  - app/api/mcp/route.ts: same treatment in the MCP dispatcher;
    AI_CAPABILITIES.md doc corrected

Also bundles (not related to the above):
  - Workspace API keys are now revealable from settings. New
    key_encrypted column stores AES-256-GCM(VIBN_SECRETS_KEY, token).
    POST /api/workspaces/[slug]/keys/[keyId]/reveal returns plaintext
    for session principals only; API-key principals cannot reveal
    siblings. Legacy keys stay valid for auth but can't reveal.
  - P5.3 Object storage: lib/gcp/storage.ts + lib/workspace-gcs.ts
    idempotently provision a per-workspace GCS bucket, service
    account, IAM binding and HMAC key. New POST /api/workspaces/
    [slug]/storage/buckets endpoint. Migration script + smoke test
    included. Proven end-to-end against prod master-ai-484822.

Made-with: Cursor
This commit is contained in:
2026-04-23 11:46:50 -07:00
parent 651ddf1e11
commit 3192e0f7b9
14 changed files with 1794 additions and 37 deletions

View File

@@ -406,15 +406,25 @@ async function toolAppsEnvsUpsert(principal: Principal, params: Record<string, a
);
}
await getApplicationInProject(appUuid, projectUuid);
// Coolify v4 rejects `is_build_time` on POST/PATCH (it's a derived
// read-only flag now). Silently drop it here so agents that still send
// it don't get a surprise 422. See lib/coolify.ts upsertApplicationEnv
// for the hard enforcement at the network boundary.
const result = await upsertApplicationEnv(appUuid, {
key,
value,
is_preview: !!params.is_preview,
is_build_time: !!params.is_build_time,
is_literal: !!params.is_literal,
is_multiline: !!params.is_multiline,
is_shown_once: !!params.is_shown_once,
});
return NextResponse.json({ result });
const body: Record<string, unknown> = { result };
if (params.is_build_time !== undefined) {
body.warnings = [
'is_build_time is ignored — Coolify derives build-vs-runtime from Dockerfile ARG usage. Omit this field going forward.',
];
}
return NextResponse.json(body);
}
async function toolAppsEnvsDelete(principal: Principal, params: Record<string, any>) {

View File

@@ -1,10 +1,15 @@
/**
* GET /api/workspaces/[slug]/apps/[uuid]/envs — list env vars
* PATCH /api/workspaces/[slug]/apps/[uuid]/envs — upsert one env var
* body: { key, value, is_preview?, is_build_time?, is_literal?, is_multiline? }
* body: { key, value, is_preview?, is_literal?, is_multiline?, is_shown_once? }
* DELETE /api/workspaces/[slug]/apps/[uuid]/envs?key=FOO — delete one env var
*
* Tenant boundary: the app must belong to the workspace's Coolify project.
*
* NOTE: `is_build_time` is **not** a writable flag in Coolify v4 — it's a
* derived read-only attribute. We silently drop it from incoming request
* bodies for back-compat with older agents; the value is computed by
* Coolify at build time based on Dockerfile ARG usage.
*/
import { NextResponse } from 'next/server';
@@ -66,7 +71,11 @@ export async function GET(
key: e.key,
value: reveal ? e.value : maskValue(e.value),
isPreview: e.is_preview ?? false,
isBuildTime: e.is_build_time ?? false,
// Coolify spells the read-only build-time flag two different ways
// depending on version — `is_buildtime` (new, one word) and
// `is_build_time` (old, underscored). Fall through both.
isBuildTime: e.is_buildtime ?? e.is_build_time ?? false,
isRuntime: e.is_runtime ?? true,
isLiteral: e.is_literal ?? false,
isMultiline: e.is_multiline ?? false,
})),
@@ -91,9 +100,11 @@ export async function PATCH(
key?: string;
value?: string;
is_preview?: boolean;
/** @deprecated silently dropped — Coolify no longer accepts this on write. */
is_build_time?: boolean;
is_literal?: boolean;
is_multiline?: boolean;
is_shown_once?: boolean;
};
try {
body = await request.json();
@@ -110,11 +121,22 @@ export async function PATCH(
key: body.key,
value: body.value,
is_preview: body.is_preview ?? false,
is_build_time: body.is_build_time ?? false,
is_literal: body.is_literal ?? false,
is_multiline: body.is_multiline ?? false,
is_shown_once: body.is_shown_once ?? false,
});
return NextResponse.json({
ok: true,
key: env.key,
// Soft-deprecation signal so the caller's agent can learn to stop
// sending the flag without hard-breaking today.
warnings:
body.is_build_time !== undefined
? [
'is_build_time is ignored — Coolify derives build-vs-runtime from Dockerfile ARG usage. Omit this field going forward.',
]
: undefined,
});
return NextResponse.json({ ok: true, key: env.key });
} catch (err) {
return NextResponse.json(
{ error: 'Coolify request failed', details: String(err) },

View File

@@ -0,0 +1,57 @@
/**
* POST /api/workspaces/[slug]/keys/[keyId]/reveal
*
* Returns the plaintext `vibn_sk_...` token for an active workspace key.
*
* Intentionally restricted to SESSION principals. An API-key principal
* cannot reveal keys — this prevents a leaked agent token from being
* used to exfiltrate sibling keys. We use POST (not GET) to keep the
* secret out of server logs / the browser history / referrer headers.
*
* Returns 409 with { revealable: false } for legacy keys minted before
* the key_encrypted column existed — those plaintexts were never stored
* and can never be recovered. The caller should prompt the user to
* rotate (revoke + mint new).
*/
import { NextResponse } from 'next/server';
import {
requireWorkspacePrincipal,
revealWorkspaceApiKey,
} from '@/lib/auth/workspace-auth';
export async function POST(
request: Request,
{ params }: { params: Promise<{ slug: string; keyId: string }> },
) {
const { slug, keyId } = await params;
const principal = await requireWorkspacePrincipal(request, { targetSlug: slug });
if (principal instanceof NextResponse) return principal;
if (principal.source !== 'session') {
return NextResponse.json(
{ error: 'API keys can only be revealed from a signed-in session' },
{ status: 403 },
);
}
const revealed = await revealWorkspaceApiKey(principal.workspace.id, keyId);
if (!revealed) {
return NextResponse.json(
{
error:
'Key not found, already revoked, or was minted before reveal was enabled. ' +
'Rotate the key (revoke + create new) if you need the plaintext.',
revealable: false,
},
{ status: 409 },
);
}
return NextResponse.json({
id: revealed.id,
name: revealed.name,
prefix: revealed.prefix,
token: revealed.token,
});
}

View File

@@ -0,0 +1,98 @@
/**
* GET /api/workspaces/[slug]/storage/buckets — describe the workspace's
* provisioned GCS state (default bucket name, SA email, HMAC accessId,
* provision status). Does NOT return the HMAC secret.
*
* POST /api/workspaces/[slug]/storage/buckets — idempotently provisions
* the per-workspace GCS substrate:
* 1. dedicated GCP service account (vibn-ws-{slug}@…)
* 2. SA JSON keyfile (encrypted at rest)
* 3. default bucket vibn-ws-{slug}-{6char} in northamerica-northeast1
* 4. roles/storage.objectAdmin binding for the SA on that bucket
* 5. HMAC key on the SA so app code can use AWS S3 SDKs
* Safe to re-run; each step short-circuits when already complete.
*
* Auth: session OR `Bearer vibn_sk_...`. Same workspace-scope rules as
* every other /api/workspaces/[slug]/* endpoint.
*
* P5.3 — vertical slice. The full storage.* tool family (presign,
* list_objects, delete_object, set_lifecycle) lands once this
* provisioning step is verified end-to-end.
*/
import { NextResponse } from 'next/server';
import { requireWorkspacePrincipal } from '@/lib/auth/workspace-auth';
import {
ensureWorkspaceGcsProvisioned,
getWorkspaceGcsState,
} from '@/lib/workspace-gcs';
export async function GET(
request: Request,
{ params }: { params: Promise<{ slug: string }> },
) {
const { slug } = await params;
const principal = await requireWorkspacePrincipal(request, { targetSlug: slug });
if (principal instanceof NextResponse) return principal;
const ws = await getWorkspaceGcsState(principal.workspace.id);
if (!ws) {
return NextResponse.json({ error: 'Workspace not found' }, { status: 404 });
}
return NextResponse.json({
workspace: { slug: ws.slug },
storage: {
status: ws.gcp_provision_status ?? 'pending',
error: ws.gcp_provision_error ?? null,
serviceAccountEmail: ws.gcp_service_account_email ?? null,
defaultBucketName: ws.gcs_default_bucket_name ?? null,
hmacAccessId: ws.gcs_hmac_access_id ?? null,
location: 'northamerica-northeast1',
},
});
}
export async function POST(
request: Request,
{ params }: { params: Promise<{ slug: string }> },
) {
const { slug } = await params;
const principal = await requireWorkspacePrincipal(request, { targetSlug: slug });
if (principal instanceof NextResponse) return principal;
try {
const result = await ensureWorkspaceGcsProvisioned(principal.workspace);
return NextResponse.json(
{
workspace: { slug: principal.workspace.slug },
storage: {
status: result.status,
serviceAccountEmail: result.serviceAccountEmail,
bucket: result.bucket,
hmacAccessId: result.hmac.accessId,
location: result.bucket.location,
},
},
{ status: 200 },
);
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
// Schema-not-applied detection: makes the failure mode obvious in
// dev before the operator runs scripts/migrate-workspace-gcs.sql.
if (/column .* does not exist/i.test(message)) {
return NextResponse.json(
{
error:
'GCS columns missing on vibn_workspaces. Run scripts/migrate-workspace-gcs.sql.',
details: message,
},
{ status: 503 },
);
}
return NextResponse.json(
{ error: 'GCS provisioning failed', details: message },
{ status: 502 },
);
}
}

View File

@@ -33,7 +33,7 @@ import {
AlertDialogTitle,
} from "@/components/ui/alert-dialog";
import { toast } from "sonner";
import { Copy, Download, KeyRound, Loader2, Plus, RefreshCw, Trash2 } from "lucide-react";
import { Copy, Download, Eye, EyeOff, KeyRound, Loader2, Plus, RefreshCw, Trash2 } from "lucide-react";
interface WorkspaceSummary {
id: string;
@@ -63,6 +63,12 @@ interface ApiKey {
last_used_at: string | null;
revoked_at: string | null;
created_at: string;
/**
* True if the server still has the encrypted plaintext and can reveal
* it again on demand. False for legacy keys minted before the
* key_encrypted column was added — those can only be rotated.
*/
revealable: boolean;
}
interface MintedKey {
@@ -255,8 +261,8 @@ export function WorkspaceKeysPanel({ workspaceSlug: _urlHint }: { workspaceSlug?
<DialogTitle>Create workspace API key</DialogTitle>
<DialogDescription>
Used by AI agents (Cursor, Claude, scripts) to act on
behalf of <code>{workspace.slug}</code>. The token is shown
once save it somewhere safe.
behalf of <code>{workspace.slug}</code>. You&apos;ll be able
to reveal and copy the token again later from this page.
</DialogDescription>
</DialogHeader>
<div style={{ display: "grid", gap: 8 }}>
@@ -288,15 +294,15 @@ export function WorkspaceKeysPanel({ workspaceSlug: _urlHint }: { workspaceSlug?
<Dialog open={!!minted} onOpenChange={open => !open && setMinted(null)}>
<DialogContent style={{ maxWidth: 640 }}>
<DialogHeader>
<DialogTitle>Save your API key</DialogTitle>
<DialogTitle>Your new API key</DialogTitle>
<DialogDescription>
This is the only time the full key is shown. Store it in a
password manager or paste it into the Cursor config below.
Copy this into your AI tool now, or come back to this page
later and click <em>Show key</em> to reveal it again.
</DialogDescription>
</DialogHeader>
{minted && <MintedKeyView workspace={workspace} minted={minted} />}
<DialogFooter>
<Button onClick={() => setMinted(null)}>I&apos;ve saved it</Button>
<Button onClick={() => setMinted(null)}>Done</Button>
</DialogFooter>
</DialogContent>
</Dialog>
@@ -435,7 +441,8 @@ function KeysCard({
<h2 style={cardTitleStyle}>API keys</h2>
<p style={cardSubtitleStyle}>
Tokens scoped to <code>{workspace.slug}</code>. Use them in Cursor,
Claude Code, the CLI, or any HTTP client.
Claude Code, the CLI, or any HTTP client. Click <em>Show key</em> on
any row to reveal the full token.
</p>
</div>
<div style={{ display: "flex", gap: 8 }}>
@@ -454,7 +461,12 @@ function KeysCard({
) : (
<ul style={{ listStyle: "none", margin: 0, padding: 0, display: "flex", flexDirection: "column", gap: 8 }}>
{active.map(k => (
<KeyRow key={k.id} k={k} onRevoke={() => onRevokeClick(k)} />
<KeyRow
key={k.id}
k={k}
workspaceSlug={workspace.slug}
onRevoke={() => onRevokeClick(k)}
/>
))}
</ul>
)}
@@ -466,7 +478,7 @@ function KeysCard({
</summary>
<ul style={{ listStyle: "none", margin: "8px 0 0", padding: 0, display: "flex", flexDirection: "column", gap: 6, opacity: 0.6 }}>
{revoked.map(k => (
<KeyRow key={k.id} k={k} />
<KeyRow key={k.id} k={k} workspaceSlug={workspace.slug} />
))}
</ul>
</details>
@@ -475,22 +487,90 @@ function KeysCard({
);
}
function KeyRow({ k, onRevoke }: { k: ApiKey; onRevoke?: () => void }) {
function KeyRow({
k,
workspaceSlug,
onRevoke,
}: {
k: ApiKey;
workspaceSlug: string;
onRevoke?: () => void;
}) {
const [token, setToken] = useState<string | null>(null);
const [revealing, setRevealing] = useState(false);
const [visible, setVisible] = useState(false);
const isActive = !k.revoked_at;
const reveal = useCallback(async () => {
setRevealing(true);
try {
const res = await fetch(
`/api/workspaces/${workspaceSlug}/keys/${k.id}/reveal`,
{ method: "POST", credentials: "include" },
);
const body = await res.json().catch(() => ({}));
if (!res.ok) {
throw new Error(body?.error ?? `HTTP ${res.status}`);
}
setToken(body.token as string);
setVisible(true);
} catch (err) {
toast.error(
`Couldn't reveal key: ${err instanceof Error ? err.message : String(err)}`,
);
} finally {
setRevealing(false);
}
}, [k.id, workspaceSlug]);
const copy = useCallback(() => {
if (!token) return;
navigator.clipboard.writeText(token).then(
() => toast.success("Key copied"),
() => toast.error("Copy failed"),
);
}, [token]);
const masked = token
? `${token.slice(0, 12)}${"•".repeat(24)}${token.slice(-4)}`
: null;
return (
<li
style={{
display: "flex",
alignItems: "center",
gap: 12,
flexDirection: "column",
gap: 8,
padding: "10px 12px",
background: "#fff",
border: "1px solid var(--border, #e5e7eb)",
borderRadius: 8,
}}
>
<div style={{ display: "flex", alignItems: "center", gap: 12 }}>
<KeyRound size={16} style={{ color: "var(--muted)" }} />
<div style={{ flex: 1, minWidth: 0 }}>
<div style={{ fontSize: 13, fontWeight: 600, color: "var(--ink)" }}>{k.name}</div>
<div style={{ fontSize: 13, fontWeight: 600, color: "var(--ink)" }}>
{k.name}
{!k.revealable && isActive && (
<span
style={{
marginLeft: 8,
fontSize: 10,
fontWeight: 600,
letterSpacing: "0.04em",
textTransform: "uppercase",
color: "var(--muted)",
border: "1px solid var(--border, #e5e7eb)",
padding: "1px 6px",
borderRadius: 4,
}}
title="Minted before reveal was enabled — rotate to get a revealable key"
>
legacy
</span>
)}
</div>
<div style={{ fontSize: 11, color: "var(--muted)", fontFamily: "monospace" }}>
{k.prefix}
{k.last_used_at
@@ -498,11 +578,60 @@ function KeyRow({ k, onRevoke }: { k: ApiKey; onRevoke?: () => void }) {
: " · never used"}
</div>
</div>
{onRevoke && (
{isActive && k.revealable && !token && (
<Button
variant="outline"
size="sm"
onClick={reveal}
disabled={revealing}
aria-label="Show key"
>
{revealing ? <Loader2 className="animate-spin" size={14} /> : <Eye size={14} />}
Show key
</Button>
)}
{isActive && token && (
<>
<Button
variant="ghost"
size="sm"
onClick={() => setVisible(v => !v)}
aria-label={visible ? "Hide key" : "Show key"}
>
{visible ? <EyeOff size={14} /> : <Eye size={14} />}
</Button>
<Button variant="ghost" size="sm" onClick={copy} aria-label="Copy key">
<Copy size={14} />
</Button>
</>
)}
{onRevoke && isActive && (
<Button variant="ghost" size="sm" onClick={onRevoke} aria-label="Revoke">
<Trash2 size={14} />
</Button>
)}
</div>
{token && (
<code
style={{
display: "block",
padding: "8px 10px",
background: "#0f172a",
color: "#e2e8f0",
borderRadius: 6,
fontFamily: "monospace",
fontSize: 12,
wordBreak: "break-all",
userSelect: "all",
}}
>
{visible ? token : masked}
</code>
)}
</li>
);
}
@@ -842,7 +971,7 @@ curl -sSfL -H "Authorization: Bearer $VIBN_API_KEY" ${APP_BASE}/api/workspaces/$
<div style={{ display: "flex", flexDirection: "column", gap: 14 }}>
<FileBlock
title="Your key"
description="Copy this now — the full value is never shown again."
description="Copy now, or reveal it later from the API keys list above."
filename={`${workspace.slug}-${minted.name.replace(/\s+/g, "-")}.txt`}
contents={minted.token}
language="text"

View File

@@ -14,6 +14,7 @@
import { createHash, randomBytes } from 'crypto';
import { NextResponse } from 'next/server';
import { authSession } from '@/lib/auth/session-server';
import { encryptSecret, decryptSecret } from '@/lib/auth/secret-box';
import { query, queryOne } from '@/lib/db-postgres';
import {
type VibnWorkspace,
@@ -167,17 +168,24 @@ export async function mintWorkspaceApiKey(opts: {
const token = `${KEY_PREFIX}${random}`;
const hash = hashKey(token);
const prefix = token.slice(0, 12); // e.g. "vibn_sk_AbCd"
// AES-256-GCM encrypt the plaintext so session-authenticated users can
// reveal the key later (see revealWorkspaceApiKey). Encryption uses
// VIBN_SECRETS_KEY — same envelope as Gitea bot PATs and GCS HMAC
// secrets. If that env var isn't set we'd rather fail loudly here
// than silently mint unrevealable keys.
const encrypted = encryptSecret(token);
const inserted = await query<{ id: string; created_at: Date }>(
`INSERT INTO vibn_workspace_api_keys
(workspace_id, name, key_prefix, key_hash, scopes, created_by)
VALUES ($1, $2, $3, $4, $5::jsonb, $6)
(workspace_id, name, key_prefix, key_hash, key_encrypted, scopes, created_by)
VALUES ($1, $2, $3, $4, $5, $6::jsonb, $7)
RETURNING id, created_at`,
[
opts.workspaceId,
opts.name,
prefix,
hash,
encrypted,
JSON.stringify(opts.scopes ?? ['workspace:*']),
opts.createdBy,
]
@@ -193,6 +201,46 @@ export async function mintWorkspaceApiKey(opts: {
};
}
/**
* Return the plaintext for an active key belonging to the workspace, if
* we have it stored encrypted. Returns `null` when:
* - the key doesn't exist or is in another workspace
* - the key is revoked
* - the key predates the revealability migration (key_encrypted is NULL)
* - decryption fails (VIBN_SECRETS_KEY rotated without re-provisioning)
*
* Intentionally agnostic to auth — the caller MUST have already checked
* that the principal is a session user for this workspace. Never call
* this behind an API-key principal, or a compromised key could exfiltrate
* its siblings.
*/
export async function revealWorkspaceApiKey(
workspaceId: string,
keyId: string,
): Promise<{ id: string; name: string; prefix: string; token: string } | null> {
const row = await queryOne<{
id: string;
name: string;
key_prefix: string;
key_encrypted: string | null;
revoked_at: Date | null;
}>(
`SELECT id, name, key_prefix, key_encrypted, revoked_at
FROM vibn_workspace_api_keys
WHERE id = $1 AND workspace_id = $2
LIMIT 1`,
[keyId, workspaceId],
);
if (!row || row.revoked_at || !row.key_encrypted) return null;
try {
const token = decryptSecret(row.key_encrypted);
return { id: row.id, name: row.name, prefix: row.key_prefix, token };
} catch (err) {
console.error('[reveal] decrypt failed for key', keyId, err);
return null;
}
}
export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
id: string;
name: string;
@@ -202,6 +250,7 @@ export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
last_used_at: Date | null;
revoked_at: Date | null;
created_at: Date;
revealable: boolean;
}>> {
const rows = await query<{
id: string;
@@ -212,8 +261,10 @@ export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
last_used_at: Date | null;
revoked_at: Date | null;
created_at: Date;
revealable: boolean;
}>(
`SELECT id, name, key_prefix, scopes, created_by, last_used_at, revoked_at, created_at
`SELECT id, name, key_prefix, scopes, created_by, last_used_at, revoked_at, created_at,
(key_encrypted IS NOT NULL) AS revealable
FROM vibn_workspace_api_keys
WHERE workspace_id = $1
ORDER BY created_at DESC`,
@@ -228,6 +279,7 @@ export async function listWorkspaceApiKeys(workspaceId: string): Promise<Array<{
last_used_at: r.last_used_at,
revoked_at: r.revoked_at,
created_at: r.created_at,
revealable: r.revealable,
}));
}

View File

@@ -61,15 +61,68 @@ export interface CoolifyApplication {
environment?: { id?: number; project_uuid?: string; project?: { uuid?: string } };
}
/**
* Coolify env var, as returned by GET /applications/{uuid}/envs.
*
* NOTE on build-time vars: Coolify removed `is_build_time` from the
* **write** schema some time ago. The flag is now a derived read-only
* attribute (`is_buildtime`, one word) computed from whether the var
* is referenced as a Dockerfile ARG. `is_build_time` (underscored) is
* kept here only to tolerate very old read responses — never send it
* on POST/PATCH. See `COOLIFY_ENV_WRITE_FIELDS` below.
*/
export interface CoolifyEnvVar {
uuid?: string;
key: string;
value: string;
is_preview?: boolean;
/** @deprecated read-only, derived server-side. Do not send on write. */
is_build_time?: boolean;
/** Newer one-word spelling of the same derived read-only flag. */
is_buildtime?: boolean;
is_runtime?: boolean;
is_literal?: boolean;
is_multiline?: boolean;
is_shown_once?: boolean;
is_shared?: boolean;
}
/**
* The only fields Coolify v4 accepts on POST/PATCH /applications/{uuid}/envs.
* Any other field (notably `is_build_time`) triggers a 422
* "This field is not allowed." Build-time vs runtime is no longer a
* writable flag — Coolify infers it at build time.
*
* Source of truth:
* https://coolify.io/docs/api-reference/api/operations/update-env-by-application-uuid
* https://coolify.io/docs/api-reference/api/operations/create-env-by-application-uuid
*/
const COOLIFY_ENV_WRITE_FIELDS = [
'key',
'value',
'is_preview',
'is_literal',
'is_multiline',
'is_shown_once',
] as const;
type CoolifyEnvWritePayload = {
key: string;
value: string;
is_preview?: boolean;
is_literal?: boolean;
is_multiline?: boolean;
is_shown_once?: boolean;
};
function toCoolifyEnvWritePayload(env: CoolifyEnvVar): CoolifyEnvWritePayload {
const src = env as unknown as Record<string, unknown>;
const out: Record<string, unknown> = {};
for (const k of COOLIFY_ENV_WRITE_FIELDS) {
const v = src[k];
if (v !== undefined) out[k] = v;
}
return out as CoolifyEnvWritePayload;
}
export interface CoolifyPrivateKey {
@@ -539,17 +592,22 @@ export async function upsertApplicationEnv(
uuid: string,
env: CoolifyEnvVar & { is_preview?: boolean }
): Promise<CoolifyEnvVar> {
// Strip any read-only/derived fields (`is_build_time`, `is_buildtime`,
// `is_runtime`, `is_shared`, `uuid`) before sending — Coolify returns
// 422 "This field is not allowed." for anything outside the write
// schema. See COOLIFY_ENV_WRITE_FIELDS.
const payload = toCoolifyEnvWritePayload(env);
try {
return await coolifyFetch(`/applications/${uuid}/envs`, {
method: 'PATCH',
body: JSON.stringify(env),
body: JSON.stringify(payload),
});
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
if (msg.includes('404') || msg.includes('405')) {
return coolifyFetch(`/applications/${uuid}/envs`, {
method: 'POST',
body: JSON.stringify(env),
body: JSON.stringify(payload),
});
}
throw err;

145
lib/gcp/iam.ts Normal file
View File

@@ -0,0 +1,145 @@
/**
* Google Cloud IAM driver — service-account creation + key minting.
*
* Auth uses the shared `vibn-workspace-provisioner` SA via getGcpAccessToken().
* That SA needs `roles/iam.serviceAccountAdmin` and `roles/iam.serviceAccountKeyAdmin`
* at the project level, plus `roles/iam.serviceAccountUser` so it can act as the
* SAs it creates.
*
* All calls go through https://iam.googleapis.com/v1.
*/
import { getGcpAccessToken, GCP_PROJECT_ID } from '@/lib/gcp-auth';
const IAM_API = 'https://iam.googleapis.com/v1';
async function authedFetch(
method: 'GET' | 'POST' | 'DELETE' | 'PATCH',
url: string,
body?: unknown,
): Promise<Response> {
const token = await getGcpAccessToken();
const headers: Record<string, string> = {
Authorization: `Bearer ${token}`,
Accept: 'application/json',
};
if (body) headers['Content-Type'] = 'application/json';
return fetch(url, {
method,
headers,
body: body ? JSON.stringify(body) : undefined,
});
}
async function parseOrThrow<T>(res: Response, context: string): Promise<T> {
const text = await res.text();
if (!res.ok) {
throw new Error(`[gcp-iam ${context} ${res.status}] ${text.slice(0, 500)}`);
}
return text ? (JSON.parse(text) as T) : ({} as T);
}
// ────────────────────────────────────────────────────────────────────
// Service-account naming
// ────────────────────────────────────────────────────────────────────
/**
* GCP service-account IDs are 6-30 chars, [a-z][a-z0-9-]{4,28}[a-z0-9].
* Some workspace slugs are too long or have edge characters, so normalize.
*/
export function workspaceServiceAccountId(slug: string): string {
const safe = slug.toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/-+/g, '-');
// Reserve "vibn-ws-" prefix (8 chars) → up to 22 left for the slug.
const trimmed = safe.replace(/^-+|-+$/g, '').slice(0, 22) || 'workspace';
const padded = trimmed.length < 4 ? `${trimmed}-ws` : trimmed;
return `vibn-ws-${padded}`;
}
export function workspaceServiceAccountEmail(slug: string, projectId = GCP_PROJECT_ID): string {
return `${workspaceServiceAccountId(slug)}@${projectId}.iam.gserviceaccount.com`;
}
// ────────────────────────────────────────────────────────────────────
// Service-account CRUD
// ────────────────────────────────────────────────────────────────────
export interface GcpServiceAccount {
name: string;
email: string;
uniqueId: string;
displayName?: string;
description?: string;
}
export async function getServiceAccount(email: string): Promise<GcpServiceAccount | null> {
const url = `${IAM_API}/projects/${GCP_PROJECT_ID}/serviceAccounts/${encodeURIComponent(email)}`;
const res = await authedFetch('GET', url);
if (res.status === 404) return null;
return parseOrThrow<GcpServiceAccount>(res, 'getServiceAccount');
}
export async function createServiceAccount(opts: {
accountId: string;
displayName: string;
description?: string;
}): Promise<GcpServiceAccount> {
const url = `${IAM_API}/projects/${GCP_PROJECT_ID}/serviceAccounts`;
const res = await authedFetch('POST', url, {
accountId: opts.accountId,
serviceAccount: {
displayName: opts.displayName,
description: opts.description,
},
});
// Race-safe: if it was just created concurrently, fetch the existing one.
if (res.status === 409) {
const email = `${opts.accountId}@${GCP_PROJECT_ID}.iam.gserviceaccount.com`;
const existing = await getServiceAccount(email);
if (existing) return existing;
}
return parseOrThrow<GcpServiceAccount>(res, 'createServiceAccount');
}
/**
* Idempotently ensures the workspace's SA exists. Returns its email.
*/
export async function ensureWorkspaceServiceAccount(opts: {
slug: string;
workspaceName?: string;
}): Promise<GcpServiceAccount> {
const email = workspaceServiceAccountEmail(opts.slug);
const existing = await getServiceAccount(email);
if (existing) return existing;
return createServiceAccount({
accountId: workspaceServiceAccountId(opts.slug),
displayName: `Vibn workspace: ${opts.workspaceName ?? opts.slug}`,
description: `Auto-provisioned by Vibn for workspace "${opts.slug}". Owns workspace-scoped GCS bucket(s) and (eventually) Cloud Tasks queues + Scheduler jobs.`,
});
}
// ────────────────────────────────────────────────────────────────────
// Service-account key minting
//
// We mint a JSON keyfile per workspace once at provision time and store
// it encrypted. Currently only used so app code can authenticate as the
// workspace's SA (e.g. to call GCS / Cloud Tasks from inside a deployed
// container). The control-plane itself uses the shared provisioner SA.
// ────────────────────────────────────────────────────────────────────
export interface GcpServiceAccountKey {
/** Resource name, e.g. projects/.../serviceAccounts/.../keys/<id>. */
name: string;
/** Base64-encoded JSON keyfile (Google's privateKeyData format). */
privateKeyData: string;
}
export async function createServiceAccountKey(saEmail: string): Promise<GcpServiceAccountKey> {
const url = `${IAM_API}/projects/${GCP_PROJECT_ID}/serviceAccounts/${encodeURIComponent(
saEmail,
)}/keys`;
const res = await authedFetch('POST', url, {
privateKeyType: 'TYPE_GOOGLE_CREDENTIALS_FILE',
keyAlgorithm: 'KEY_ALG_RSA_2048',
});
return parseOrThrow<GcpServiceAccountKey>(res, 'createServiceAccountKey');
}

341
lib/gcp/storage.ts Normal file
View File

@@ -0,0 +1,341 @@
/**
* Google Cloud Storage driver for per-workspace buckets.
*
* Auth uses the shared `vibn-workspace-provisioner` SA via
* getGcpAccessToken(). That SA needs:
* - roles/storage.admin (create/delete buckets, set IAM)
* - roles/storage.hmacKeyAdmin (mint per-workspace HMAC keys)
*
* All resources are pinned to `northamerica-northeast1` (Montreal) per
* the §0 Substrate constraint. Calls to other regions are refused at
* this layer rather than relying on org policy alone.
*
* APIs:
* - JSON API: https://storage.googleapis.com/storage/v1/... (bucket + IAM)
* - HMAC keys also live under JSON API at .../projects/_/hmacKeys
*/
import { getGcpAccessToken, GCP_PROJECT_ID } from '@/lib/gcp-auth';
const STORAGE_API = 'https://storage.googleapis.com/storage/v1';
/** The only GCS location we will ever provision into. */
export const VIBN_GCS_LOCATION = 'northamerica-northeast1';
async function authedFetch(
method: 'GET' | 'POST' | 'DELETE' | 'PATCH' | 'PUT',
url: string,
body?: unknown,
): Promise<Response> {
const token = await getGcpAccessToken();
const headers: Record<string, string> = {
Authorization: `Bearer ${token}`,
Accept: 'application/json',
};
if (body !== undefined) headers['Content-Type'] = 'application/json';
return fetch(url, {
method,
headers,
body: body === undefined ? undefined : JSON.stringify(body),
});
}
async function parseOrThrow<T>(res: Response, context: string): Promise<T> {
const text = await res.text();
if (!res.ok) {
throw new Error(`[gcs ${context} ${res.status}] ${text.slice(0, 500)}`);
}
return text ? (JSON.parse(text) as T) : ({} as T);
}
// ────────────────────────────────────────────────────────────────────
// Bucket naming
// ────────────────────────────────────────────────────────────────────
/**
* GCS bucket names are globally unique across ALL of Google Cloud, so
* we suffix the workspace slug with a deterministic-but-collision-resistant
* 6-char hash derived from `${projectId}/${slug}`. Same workspace + project
* → same bucket name on retry; different projects → no collision.
*
* Format: vibn-ws-<slug>-<6char> (≤63 chars, lowercase, no underscores).
*/
export function workspaceDefaultBucketName(slug: string, projectId = GCP_PROJECT_ID): string {
const safe = slug.toLowerCase().replace(/[^a-z0-9-]/g, '-').replace(/-+/g, '-')
.replace(/^-+|-+$/g, '');
// Reserve 8 chars for "vibn-ws-" + 7 for "-<6char>" = 15 → up to 48 chars for slug.
const trimmed = safe.slice(0, 48) || 'workspace';
const hash = shortHash(`${projectId}/${slug}`);
return `vibn-ws-${trimmed}-${hash}`;
}
function shortHash(input: string): string {
// Tiny non-crypto hash → 6 base-36 chars. Good enough to disambiguate
// bucket names; not used for security.
let h = 2166136261 >>> 0;
for (let i = 0; i < input.length; i++) {
h ^= input.charCodeAt(i);
h = Math.imul(h, 16777619) >>> 0;
}
return h.toString(36).padStart(6, '0').slice(0, 6);
}
// ────────────────────────────────────────────────────────────────────
// Bucket types + CRUD
// ────────────────────────────────────────────────────────────────────
export interface GcsBucket {
name: string;
location: string;
storageClass?: string;
selfLink?: string;
timeCreated?: string;
labels?: Record<string, string>;
iamConfiguration?: {
uniformBucketLevelAccess?: { enabled: boolean };
publicAccessPrevention?: 'inherited' | 'enforced';
};
}
export async function getBucket(bucketName: string): Promise<GcsBucket | null> {
const res = await authedFetch(
'GET',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}`,
);
if (res.status === 404) return null;
return parseOrThrow<GcsBucket>(res, 'getBucket');
}
export async function createBucket(opts: {
name: string;
/** Defaults to VIBN_GCS_LOCATION; explicit other values are refused. */
location?: string;
/** Defaults to STANDARD. */
storageClass?: 'STANDARD' | 'NEARLINE' | 'COLDLINE' | 'ARCHIVE';
/** When true, blocks public access at the bucket-level. Default: true. */
enforcePublicAccessPrevention?: boolean;
/** Workspace label so we can list-by-tenant later. */
workspaceSlug?: string;
}): Promise<GcsBucket> {
const location = opts.location ?? VIBN_GCS_LOCATION;
if (location !== VIBN_GCS_LOCATION) {
throw new Error(
`[gcs createBucket] Refused: location=${location}. Vibn buckets must be in ${VIBN_GCS_LOCATION} for Canadian residency.`,
);
}
const body: Record<string, unknown> = {
name: opts.name,
location,
storageClass: opts.storageClass ?? 'STANDARD',
iamConfiguration: {
uniformBucketLevelAccess: { enabled: true },
publicAccessPrevention:
opts.enforcePublicAccessPrevention === false ? 'inherited' : 'enforced',
},
};
if (opts.workspaceSlug) {
body.labels = { workspace: opts.workspaceSlug, managed_by: 'vibn' };
}
const res = await authedFetch(
'POST',
`${STORAGE_API}/b?project=${encodeURIComponent(GCP_PROJECT_ID)}`,
body,
);
if (res.status === 409) {
// Already exists: confirm we own it (label match) and return it.
const existing = await getBucket(opts.name);
if (existing) return existing;
throw new Error(`[gcs createBucket] 409 conflict on ${opts.name} but bucket not retrievable`);
}
return parseOrThrow<GcsBucket>(res, 'createBucket');
}
export async function deleteBucket(bucketName: string): Promise<void> {
const res = await authedFetch('DELETE', `${STORAGE_API}/b/${encodeURIComponent(bucketName)}`);
if (res.status === 404) return;
await parseOrThrow(res, 'deleteBucket');
}
// ────────────────────────────────────────────────────────────────────
// Bucket IAM bindings
//
// We keep bucket policies bucket-scoped (objectAdmin only on this bucket)
// rather than granting project-wide storage roles to per-workspace SAs.
// ────────────────────────────────────────────────────────────────────
interface IamBinding {
role: string;
members: string[];
condition?: { title: string; expression: string };
}
interface IamPolicy {
version?: number;
etag?: string;
bindings?: IamBinding[];
}
export async function getBucketIamPolicy(bucketName: string): Promise<IamPolicy> {
const res = await authedFetch(
'GET',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}/iam?optionsRequestedPolicyVersion=3`,
);
return parseOrThrow<IamPolicy>(res, 'getBucketIamPolicy');
}
async function setBucketIamPolicy(bucketName: string, policy: IamPolicy): Promise<IamPolicy> {
const res = await authedFetch(
'PUT',
`${STORAGE_API}/b/${encodeURIComponent(bucketName)}/iam`,
policy,
);
return parseOrThrow<IamPolicy>(res, 'setBucketIamPolicy');
}
/**
* Idempotently grants `member` (e.g. `serviceAccount:foo@…`) the given
* role on the bucket. Returns the updated policy.
*
* Retries with backoff on "Service account ... does not exist" because
* GCP IAM has eventual consistency between the IAM API (which knows
* about a freshly-created SA immediately) and the GCS bucket-policy
* service (which can take a few seconds to learn about it). Without
* this retry, the very first call right after createServiceAccount()
* fails ~50% of the time.
*/
export async function addBucketIamBinding(opts: {
bucketName: string;
role: string;
member: string;
}): Promise<IamPolicy> {
const maxAttempts = 6;
const baseDelayMs = 1500;
let lastErr: unknown;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
const current = await getBucketIamPolicy(opts.bucketName);
const bindings = current.bindings ?? [];
const existing = bindings.find(b => b.role === opts.role && !b.condition);
if (existing && existing.members.includes(opts.member)) return current;
if (existing) {
existing.members = [...new Set([...existing.members, opts.member])];
} else {
bindings.push({ role: opts.role, members: [opts.member] });
}
return await setBucketIamPolicy(opts.bucketName, { ...current, bindings });
} catch (err) {
lastErr = err;
const msg = err instanceof Error ? err.message : String(err);
const isPropagation =
/does not exist/i.test(msg) ||
/Invalid argument/i.test(msg) ||
/Service account .* does not exist/i.test(msg);
if (!isPropagation || attempt === maxAttempts - 1) throw err;
await new Promise(r => setTimeout(r, baseDelayMs * (attempt + 1)));
}
}
throw lastErr ?? new Error('addBucketIamBinding: exhausted retries');
}
// ────────────────────────────────────────────────────────────────────
// HMAC keys (S3-compatibility credentials for app code)
//
// HMAC keys belong to a service account and let standard S3 SDKs
// authenticate against the GCS XML API at storage.googleapis.com. We
// mint one per workspace SA so app code can read/write the workspace's
// bucket using the AWS SDK without us shipping a Google-shaped JSON key
// into the container.
// ────────────────────────────────────────────────────────────────────
export interface GcsHmacKey {
/** Public access ID (looks like an AWS access key id; safe to log). */
accessId: string;
/** Plaintext secret (40 base64 chars). Returned ONCE on creation. */
secret: string;
/** Resource name. */
resourceName?: string;
/** ACTIVE / INACTIVE / DELETED. */
state?: string;
serviceAccountEmail?: string;
}
interface HmacKeyMetadata {
accessId: string;
state: string;
serviceAccountEmail: string;
resourceName?: string;
timeCreated?: string;
}
export async function createHmacKey(serviceAccountEmail: string): Promise<GcsHmacKey> {
// Retry-with-backoff on 404 because the GCS HMAC subsystem has the
// same eventual-consistency lag as bucket-IAM: the SA is real to
// iam.googleapis.com immediately, but storage.googleapis.com may
// 404 on it for several seconds after creation.
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys?serviceAccountEmail=${encodeURIComponent(serviceAccountEmail)}`;
const maxAttempts = 6;
const baseDelayMs = 1500;
let lastErr: unknown;
for (let attempt = 0; attempt < maxAttempts; attempt++) {
try {
const res = await authedFetch('POST', url);
// Body layout per docs: { kind, secret, metadata: { accessId, state, ... } }
const json = await parseOrThrow<{
secret: string;
metadata: HmacKeyMetadata;
}>(res, 'createHmacKey');
return {
accessId: json.metadata.accessId,
secret: json.secret,
resourceName: json.metadata.resourceName,
state: json.metadata.state,
serviceAccountEmail: json.metadata.serviceAccountEmail,
};
} catch (err) {
lastErr = err;
const msg = err instanceof Error ? err.message : String(err);
const isPropagation = /not found|does not exist|404/i.test(msg);
if (!isPropagation || attempt === maxAttempts - 1) throw err;
await new Promise(r => setTimeout(r, baseDelayMs * (attempt + 1)));
}
}
throw lastErr ?? new Error('createHmacKey: exhausted retries');
}
export async function listHmacKeysForServiceAccount(
serviceAccountEmail: string,
): Promise<HmacKeyMetadata[]> {
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys?serviceAccountEmail=${encodeURIComponent(serviceAccountEmail)}&showDeletedKeys=false`;
const res = await authedFetch('GET', url);
const json = await parseOrThrow<{ items?: HmacKeyMetadata[] }>(res, 'listHmacKeys');
return json.items ?? [];
}
export async function deactivateHmacKey(accessId: string): Promise<void> {
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys/${encodeURIComponent(accessId)}`;
const res = await authedFetch('PUT', url, { state: 'INACTIVE' });
await parseOrThrow(res, 'deactivateHmacKey');
}
export async function deleteHmacKey(accessId: string): Promise<void> {
// GCS requires INACTIVE before DELETE. Best-effort deactivate first.
try {
await deactivateHmacKey(accessId);
} catch (err) {
// Ignore "already inactive" errors so cleanup stays idempotent.
const msg = err instanceof Error ? err.message : String(err);
if (!/already inactive|400/i.test(msg)) throw err;
}
const url = `${STORAGE_API}/projects/${encodeURIComponent(
GCP_PROJECT_ID,
)}/hmacKeys/${encodeURIComponent(accessId)}`;
const res = await authedFetch('DELETE', url);
if (res.status === 404) return;
await parseOrThrow(res, 'deleteHmacKey');
}

280
lib/workspace-gcs.ts Normal file
View File

@@ -0,0 +1,280 @@
/**
* Per-workspace GCS provisioning (P5.3).
*
* Idempotently sets up everything a workspace needs to do object storage:
* 1. A dedicated GCP service account (vibn-ws-{slug}@…)
* 2. A JSON keyfile for that SA (encrypted at rest)
* 3. A default GCS bucket (vibn-ws-{slug}-{6char}) in northamerica-northeast1
* 4. A bucket-scoped roles/storage.objectAdmin binding for the SA
* 5. An HMAC key on the SA so app code can use AWS S3 SDKs
*
* Persists IDs + encrypted secrets onto vibn_workspaces. Safe to re-run;
* each step is idempotent and short-circuits when already complete.
*
* Required schema migration: scripts/migrate-workspace-gcs.sql
*
* The control plane itself never decrypts the per-workspace SA key — it
* always authenticates as the shared `vibn-workspace-provisioner`. The
* per-workspace credentials exist solely to be injected into deployed
* Coolify apps as STORAGE_* env vars (see app env injection in
* apps/route.ts when wired up).
*/
import { query, queryOne } from '@/lib/db-postgres';
import { encryptSecret, decryptSecret } from '@/lib/auth/secret-box';
import {
ensureWorkspaceServiceAccount,
workspaceServiceAccountEmail,
createServiceAccountKey,
} from '@/lib/gcp/iam';
import {
createBucket,
getBucket,
addBucketIamBinding,
createHmacKey,
listHmacKeysForServiceAccount,
workspaceDefaultBucketName,
VIBN_GCS_LOCATION,
type GcsBucket,
} from '@/lib/gcp/storage';
import type { VibnWorkspace } from '@/lib/workspaces';
/**
* Extra columns added by scripts/migrate-workspace-gcs.sql. We model
* them as a separate interface so the existing `VibnWorkspace` shape
* doesn't have to be touched until every caller is ready.
*/
export interface VibnWorkspaceGcs {
gcp_service_account_email: string | null;
gcp_service_account_key_enc: string | null;
gcs_default_bucket_name: string | null;
gcs_hmac_access_id: string | null;
gcs_hmac_secret_enc: string | null;
gcp_provision_status: 'pending' | 'partial' | 'ready' | 'error';
gcp_provision_error: string | null;
}
export type WorkspaceGcs = VibnWorkspace & VibnWorkspaceGcs;
export async function getWorkspaceGcsState(workspaceId: string): Promise<WorkspaceGcs | null> {
return queryOne<WorkspaceGcs>(
`SELECT * FROM vibn_workspaces WHERE id = $1`,
[workspaceId],
);
}
/** What we tell the API caller after a successful provision. */
export interface WorkspaceGcsResult {
serviceAccountEmail: string;
bucket: {
name: string;
location: string;
selfLink?: string;
timeCreated?: string;
};
hmac: {
accessId: string;
};
status: VibnWorkspaceGcs['gcp_provision_status'];
}
/**
* Idempotent: ensures the workspace has a GCP SA + key + default bucket
* + IAM binding + HMAC key. Updates vibn_workspaces with the resulting
* identifiers (key + secret stored encrypted). Returns a flat summary
* suitable for sending back to the API caller.
*
* Throws on any irrecoverable error; transient/partial failures land in
* the row's gcp_provision_status='partial' with the message in
* gcp_provision_error.
*/
export async function ensureWorkspaceGcsProvisioned(
workspace: VibnWorkspace,
): Promise<WorkspaceGcsResult> {
const ws = (await getWorkspaceGcsState(workspace.id)) ?? (workspace as WorkspaceGcs);
// ── Short-circuit if everything is already there.
if (
ws.gcp_provision_status === 'ready' &&
ws.gcp_service_account_email &&
ws.gcs_default_bucket_name &&
ws.gcs_hmac_access_id
) {
const existing = await getBucket(ws.gcs_default_bucket_name);
if (existing) {
return {
serviceAccountEmail: ws.gcp_service_account_email,
bucket: {
name: existing.name,
location: existing.location,
selfLink: existing.selfLink,
timeCreated: existing.timeCreated,
},
hmac: { accessId: ws.gcs_hmac_access_id },
status: 'ready',
};
}
// Bucket vanished out from under us (manual gcloud delete?). Fall
// through and re-provision; the SA + HMAC can stay.
}
let saEmail = ws.gcp_service_account_email;
let saKeyEnc = ws.gcp_service_account_key_enc;
let bucketName = ws.gcs_default_bucket_name;
let hmacAccessId = ws.gcs_hmac_access_id;
let hmacSecretEnc = ws.gcs_hmac_secret_enc;
let bucket: GcsBucket | null = null;
const errors: string[] = [];
// ── 1. Service account ─────────────────────────────────────────────
try {
const sa = await ensureWorkspaceServiceAccount({
slug: workspace.slug,
workspaceName: workspace.name,
});
saEmail = sa.email;
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcp-sa: ${msg}`);
saEmail = saEmail ?? workspaceServiceAccountEmail(workspace.slug);
}
// ── 2. SA keyfile ─────────────────────────────────────────────────
// Mint once. Rotation is a separate flow (Tier 2 territory).
if (!saKeyEnc && saEmail && !errors.some(e => e.startsWith('gcp-sa:'))) {
try {
const key = await createServiceAccountKey(saEmail);
// privateKeyData is already base64; we encrypt the whole base64
// payload so the column can stay TEXT and reuse secret-box.
saKeyEnc = encryptSecret(key.privateKeyData);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcp-sa-key: ${msg}`);
}
}
// ── 3. Default bucket ──────────────────────────────────────────────
if (!bucketName) bucketName = workspaceDefaultBucketName(workspace.slug);
if (!errors.some(e => e.startsWith('gcp-sa:'))) {
try {
bucket =
(await getBucket(bucketName)) ??
(await createBucket({
name: bucketName,
location: VIBN_GCS_LOCATION,
enforcePublicAccessPrevention: true,
workspaceSlug: workspace.slug,
}));
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcs-bucket: ${msg}`);
}
}
// ── 4. Bucket IAM binding for the workspace SA ─────────────────────
if (bucket && saEmail) {
try {
await addBucketIamBinding({
bucketName: bucket.name,
role: 'roles/storage.objectAdmin',
member: `serviceAccount:${saEmail}`,
});
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcs-iam: ${msg}`);
}
}
// ── 5. HMAC key for app code ───────────────────────────────────────
// Only mint if we don't already have one. GCS limits 5 active keys
// per SA; we never want to thrash this.
if (saEmail && !hmacAccessId) {
try {
// Defensive: if a previous run minted a key but failed before
// saving, reuse the existing ACTIVE one instead of stacking.
const existingHmacs = await listHmacKeysForServiceAccount(saEmail);
const active = existingHmacs.find(k => k.state === 'ACTIVE');
if (active) {
hmacAccessId = active.accessId;
// We can't recover the secret of a previously-minted key; leave
// the encrypted secret null and let the operator rotate if they
// need it injected.
} else {
const minted = await createHmacKey(saEmail);
hmacAccessId = minted.accessId;
hmacSecretEnc = encryptSecret(minted.secret);
}
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
errors.push(`gcs-hmac: ${msg}`);
}
}
const allReady = !!(saEmail && saKeyEnc && bucket && hmacAccessId && errors.length === 0);
const status: VibnWorkspaceGcs['gcp_provision_status'] = allReady
? 'ready'
: errors.length > 0
? 'partial'
: 'pending';
await query(
`UPDATE vibn_workspaces
SET gcp_service_account_email = COALESCE($2, gcp_service_account_email),
gcp_service_account_key_enc = COALESCE($3, gcp_service_account_key_enc),
gcs_default_bucket_name = COALESCE($4, gcs_default_bucket_name),
gcs_hmac_access_id = COALESCE($5, gcs_hmac_access_id),
gcs_hmac_secret_enc = COALESCE($6, gcs_hmac_secret_enc),
gcp_provision_status = $7,
gcp_provision_error = $8,
updated_at = now()
WHERE id = $1`,
[
workspace.id,
saEmail,
saKeyEnc,
bucket?.name ?? bucketName,
hmacAccessId,
hmacSecretEnc,
status,
errors.length ? errors.join('; ') : null,
],
);
if (!saEmail) throw new Error(`workspace-gcs: SA email never resolved: ${errors.join('; ')}`);
if (!bucket) throw new Error(`workspace-gcs: bucket never created: ${errors.join('; ')}`);
return {
serviceAccountEmail: saEmail,
bucket: {
name: bucket.name,
location: bucket.location,
selfLink: bucket.selfLink,
timeCreated: bucket.timeCreated,
},
hmac: { accessId: hmacAccessId ?? '' },
status,
};
}
/**
* Decrypt the workspace's HMAC secret for STORAGE_SECRET_ACCESS_KEY env
* injection. Returns null when not provisioned or decrypt fails.
*
* Callers MUST treat this as shown-once material: log neither the
* value nor anything that contains it.
*/
export function getWorkspaceGcsHmacCredentials(ws: WorkspaceGcs): {
accessId: string;
secret: string;
} | null {
if (!ws.gcs_hmac_access_id || !ws.gcs_hmac_secret_enc) return null;
try {
return {
accessId: ws.gcs_hmac_access_id,
secret: decryptSecret(ws.gcs_hmac_secret_enc),
};
} catch (err) {
console.error('[workspace-gcs] failed to decrypt HMAC secret for', ws.slug, err);
return null;
}
}

View File

@@ -0,0 +1,25 @@
-- =============================================================================
-- Make workspace API keys revealable.
--
-- Adds `key_encrypted` — base64 of secret-box(VIBN_SECRETS_KEY, plaintext token).
-- Existing rows keep `key_encrypted = NULL` and are therefore NOT revealable;
-- only the hash was stored at mint time and the plaintext is unrecoverable by
-- design. Those keys still work for auth (hash lookup is unchanged); they just
-- can't surface the plaintext again — the UI will flag them as legacy.
--
-- New keys minted after this migration will populate `key_encrypted` and can
-- be revealed on demand by session-authenticated users (never by API-key
-- principals — prevents lateral movement).
--
-- Safe to re-run.
-- =============================================================================
ALTER TABLE vibn_workspace_api_keys
ADD COLUMN IF NOT EXISTS key_encrypted TEXT;
COMMENT ON COLUMN vibn_workspace_api_keys.key_encrypted IS
'base64( AES-256-GCM encrypt(VIBN_SECRETS_KEY, plaintext vibn_sk_...) ). '
'NULL for legacy rows minted before this column existed — those keys '
'remain valid for auth but cannot be revealed.';
SELECT 'API-key revealability migration complete' AS status;

View File

@@ -0,0 +1,39 @@
-- =============================================================================
-- VIBN P5.3 — per-workspace GCS storage columns on vibn_workspaces
--
-- Adds the columns that ensureWorkspaceGcsProvisioned() persists into:
--
-- gcp_service_account_email — workspace's dedicated GCP SA, e.g.
-- vibn-ws-mark@master-ai-484822.iam.gserviceaccount.com
-- gcp_service_account_key_enc — base64( secret-box(SA JSON keyfile) ).
-- Currently only used for runtime auth from app
-- code (env injection); control-plane auth still
-- uses GOOGLE_SERVICE_ACCOUNT_KEY_B64.
-- gcs_default_bucket_name — globally-unique GCS bucket created on first
-- provision, e.g. vibn-ws-mark-a3f9c1.
-- gcs_hmac_access_id — S3-compatible HMAC access key id (plain text;
-- not a secret on its own).
-- gcs_hmac_secret_enc — base64( secret-box(HMAC secret) ). Decrypted
-- only when STORAGE_SECRET_ACCESS_KEY needs to be
-- injected into a Coolify app.
-- gcp_provision_status — independent of provision_status so a partial
-- GCP failure does not flip the whole workspace.
-- Values: 'pending' | 'partial' | 'ready' | 'error'.
-- gcp_provision_error — last error message from the GCP provisioner.
--
-- Safe to re-run.
-- =============================================================================
ALTER TABLE vibn_workspaces
ADD COLUMN IF NOT EXISTS gcp_service_account_email TEXT,
ADD COLUMN IF NOT EXISTS gcp_service_account_key_enc TEXT,
ADD COLUMN IF NOT EXISTS gcs_default_bucket_name TEXT,
ADD COLUMN IF NOT EXISTS gcs_hmac_access_id TEXT,
ADD COLUMN IF NOT EXISTS gcs_hmac_secret_enc TEXT,
ADD COLUMN IF NOT EXISTS gcp_provision_status TEXT NOT NULL DEFAULT 'pending',
ADD COLUMN IF NOT EXISTS gcp_provision_error TEXT;
CREATE INDEX IF NOT EXISTS vibn_workspaces_gcp_status_idx
ON vibn_workspaces (gcp_provision_status);
SELECT 'P5.3 workspace-GCS migration complete' AS status;

View File

@@ -0,0 +1,86 @@
/**
* One-shot: run ensureWorkspaceGcsProvisioned() for a specific workspace
* slug against PROD GCP + PROD Postgres. Idempotent — safe to re-run.
*
* Unlike scripts/smoke-storage-e2e.ts this does NOT clean up; the whole
* point is to persist the workspace's provisioned state into the DB.
*
* Usage:
* cd vibn-frontend
* npx -y dotenv-cli -e ../.google.env -e .env.local -- \
* npx tsx scripts/provision-workspace-gcs.ts <slug>
*
* Required env:
* GOOGLE_SERVICE_ACCOUNT_KEY_B64 (from ../.google.env)
* DATABASE_URL (from .env.local, points at prod vibn-postgres)
* VIBN_SECRETS_KEY (from .env.local, ≥16 chars)
*/
import { queryOne } from '../lib/db-postgres';
import { ensureWorkspaceGcsProvisioned } from '../lib/workspace-gcs';
import type { VibnWorkspace } from '../lib/workspaces';
async function main(): Promise<void> {
const slug = process.argv[2];
if (!slug) {
console.error('Usage: tsx scripts/provision-workspace-gcs.ts <workspace-slug>');
process.exit(2);
}
console.log('━'.repeat(72));
console.log(` Provision GCS for workspace: ${slug}`);
console.log('━'.repeat(72));
// Fetch the current row.
const ws = await queryOne<VibnWorkspace>(
`SELECT * FROM vibn_workspaces WHERE slug = $1`,
[slug],
);
if (!ws) {
console.error(`No vibn_workspaces row found for slug=${slug}`);
process.exit(1);
}
console.log(` id : ${ws.id}`);
console.log(` name : ${ws.name}`);
console.log(` owner_user_id : ${ws.owner_user_id}`);
// @ts-expect-error — new columns not yet in VibnWorkspace type
console.log(` gcp_status : ${ws.gcp_provision_status ?? 'pending'}`);
console.log('');
console.log('Running ensureWorkspaceGcsProvisioned()…');
const result = await ensureWorkspaceGcsProvisioned(ws);
console.log('');
console.log('━'.repeat(72));
console.log(' RESULT');
console.log('━'.repeat(72));
console.log(` status : ${result.status}`);
console.log(` SA : ${result.serviceAccountEmail}`);
console.log(` bucket : ${result.bucket.name}`);
console.log(` location : ${result.bucket.location}`);
console.log(` created : ${result.bucket.timeCreated ?? '(pre-existing)'}`);
console.log(` HMAC accessId : ${result.hmac.accessId}`);
console.log('');
// Re-read to confirm persistence.
const after = await queryOne<Record<string, unknown>>(
`SELECT gcp_service_account_email,
CASE WHEN gcp_service_account_key_enc IS NOT NULL THEN '<enc '||length(gcp_service_account_key_enc)||' b64>' ELSE 'null' END AS sa_key,
gcs_default_bucket_name,
gcs_hmac_access_id,
CASE WHEN gcs_hmac_secret_enc IS NOT NULL THEN '<enc '||length(gcs_hmac_secret_enc)||' b64>' ELSE 'null' END AS hmac_secret,
gcp_provision_status,
gcp_provision_error
FROM vibn_workspaces WHERE id = $1`,
[ws.id],
);
console.log('DB row after:');
console.log(JSON.stringify(after, null, 2));
process.exit(0);
}
main().catch(err => {
console.error('[provision-workspace-gcs] FAILED:', err);
process.exit(1);
});

View File

@@ -0,0 +1,415 @@
/**
* P5.3 — End-to-end smoke for per-workspace GCS provisioning.
*
* What this exercises (against PROD GCP — master-ai-484822):
* 1. ensureWorkspaceServiceAccount → creates a throwaway SA
* (vibn-ws-smoke-{ts}@…). Idempotent.
* 2. createServiceAccountKey → mints + base64-encodes a JSON key.
* 3. createBucket → creates vibn-ws-smoke-{ts}-{6char}
* in northamerica-northeast1 with uniform bucket-level access ON
* and public access prevention enforced.
* 4. addBucketIamBinding → grants the throwaway SA
* roles/storage.objectAdmin on the bucket only.
* 5. createHmacKey → mints S3-compatible HMAC creds
* tied to the throwaway SA.
* 6. (verify) HMAC PUT/GET → uploads a 12-byte object via the
* GCS XML API using AWS SigV4 with the HMAC creds, reads it back,
* deletes it. Proves the credentials actually work.
*
* Cleanup (best-effort, runs even on failure):
* - Deletes the test object.
* - Deactivates + deletes the HMAC key.
* - Deletes all keys on the SA (so the SA itself can be removed).
* - Deletes the bucket.
* - Deletes the SA.
*
* NO Postgres writes. NO Coolify writes. NO project-level IAM changes.
* Everything created has a "smoke-" prefix and a "purpose=smoke" label
* so leftovers are obvious in the GCP console.
*
* Required env (load from /Users/markhenderson/master-ai/.google.env):
* GOOGLE_SERVICE_ACCOUNT_KEY_B64 base64 of vibn-workspace-provisioner SA JSON
* GCP_PROJECT_ID defaults to master-ai-484822
*
* Usage:
* cd vibn-frontend
* npx -y dotenv-cli -e ../.google.env -- npx tsx scripts/smoke-storage-e2e.ts
*/
import { createHash, createHmac } from 'crypto';
import { GCP_PROJECT_ID } from '../lib/gcp-auth';
import {
ensureWorkspaceServiceAccount,
createServiceAccountKey,
workspaceServiceAccountEmail,
workspaceServiceAccountId,
} from '../lib/gcp/iam';
import {
createBucket,
deleteBucket,
addBucketIamBinding,
getBucketIamPolicy,
createHmacKey,
deleteHmacKey,
workspaceDefaultBucketName,
VIBN_GCS_LOCATION,
} from '../lib/gcp/storage';
const ts = Date.now().toString(36);
const SLUG = `smoke-${ts}`;
const SA_EMAIL = workspaceServiceAccountEmail(SLUG);
const SA_ID = workspaceServiceAccountId(SLUG);
const BUCKET = workspaceDefaultBucketName(SLUG);
const TEST_OBJECT_KEY = 'smoke/hello.txt';
const TEST_OBJECT_BODY = 'vibn smoke ✓';
function banner(): void {
console.log('━'.repeat(72));
console.log(' VIBN P5.3 GCS provisioning smoke (PROD GCP — master-ai-484822)');
console.log('━'.repeat(72));
console.log(` project : ${GCP_PROJECT_ID}`);
console.log(` slug : ${SLUG}`);
console.log(` SA : ${SA_EMAIL}`);
console.log(` bucket : ${BUCKET}`);
console.log(` location : ${VIBN_GCS_LOCATION}`);
console.log('');
}
interface State {
saCreated: boolean;
saKeyName?: string;
bucketCreated: boolean;
hmacAccessId?: string;
uploadedObject: boolean;
}
async function main(): Promise<void> {
banner();
const state: State = { saCreated: false, bucketCreated: false, uploadedObject: false };
try {
// ── 1. Service account ────────────────────────────────────────────
console.log('[1/6] Ensure service account…');
const sa = await ensureWorkspaceServiceAccount({ slug: SLUG, workspaceName: SLUG });
state.saCreated = true;
console.log(`${sa.email}`);
// ── 2. Service-account key ────────────────────────────────────────
console.log('[2/6] Mint service-account JSON key…');
const key = await createServiceAccountKey(sa.email);
state.saKeyName = key.name;
console.log(` ✓ key.name=${key.name.split('/').slice(-1)[0]} (privateKeyData ${key.privateKeyData.length} chars b64)`);
// ── 3. Bucket ────────────────────────────────────────────────────
console.log('[3/6] Create bucket (uniform BLA on, public-access prevention enforced)…');
const bucket = await createBucket({
name: BUCKET,
location: VIBN_GCS_LOCATION,
enforcePublicAccessPrevention: true,
workspaceSlug: SLUG,
});
state.bucketCreated = true;
console.log(`${bucket.name} in ${bucket.location}`);
// ── 4. Bucket IAM binding ────────────────────────────────────────
console.log('[4/6] Add roles/storage.objectAdmin binding for the workspace SA…');
await addBucketIamBinding({
bucketName: bucket.name,
role: 'roles/storage.objectAdmin',
member: `serviceAccount:${sa.email}`,
});
const policy = await getBucketIamPolicy(bucket.name);
const binding = policy.bindings?.find(
b => b.role === 'roles/storage.objectAdmin' && b.members.includes(`serviceAccount:${sa.email}`),
);
if (!binding) {
throw new Error('IAM binding did not stick — workspace SA not in objectAdmin members');
}
console.log(` ✓ binding present (${binding.members.length} member(s) on ${binding.role})`);
// ── 5. HMAC key ──────────────────────────────────────────────────
console.log('[5/6] Mint HMAC key for the workspace SA…');
const hmac = await createHmacKey(sa.email);
state.hmacAccessId = hmac.accessId;
console.log(` ✓ accessId=${hmac.accessId} state=${hmac.state}`);
// HMAC keys take a few seconds to become usable on the GCS XML API.
// Without this delay we usually get "InvalidAccessKeyId" on the
// very first request.
console.log(' … waiting 6s for HMAC propagation');
await sleep(6000);
// ── 6. Verify HMAC creds work via S3-compatible XML API ─────────
console.log('[6/6] PUT / GET / DELETE a tiny object via the XML API using HMAC creds…');
await s3PutObject({
accessKeyId: hmac.accessId,
secretAccessKey: hmac.secret,
bucket: bucket.name,
key: TEST_OBJECT_KEY,
body: Buffer.from(TEST_OBJECT_BODY, 'utf-8'),
contentType: 'text/plain; charset=utf-8',
});
state.uploadedObject = true;
console.log(` ✓ PUT ${TEST_OBJECT_KEY}`);
const got = await s3GetObject({
accessKeyId: hmac.accessId,
secretAccessKey: hmac.secret,
bucket: bucket.name,
key: TEST_OBJECT_KEY,
});
if (got.toString('utf-8') !== TEST_OBJECT_BODY) {
throw new Error(`GET body mismatch: ${JSON.stringify(got.toString('utf-8'))}`);
}
console.log(` ✓ GET round-trip body matches`);
await s3DeleteObject({
accessKeyId: hmac.accessId,
secretAccessKey: hmac.secret,
bucket: bucket.name,
key: TEST_OBJECT_KEY,
});
state.uploadedObject = false;
console.log(` ✓ DELETE`);
console.log('');
console.log('━'.repeat(72));
console.log(' SUMMARY');
console.log('━'.repeat(72));
console.log(' SA create+key : ✓');
console.log(' Bucket create : ✓');
console.log(' Bucket IAM binding : ✓');
console.log(' HMAC key + S3 round-trip : ✓');
console.log('');
console.log(' All 4 building blocks of P5.3 vertical slice proven against PROD GCP.');
} catch (err) {
console.error('');
console.error('[smoke-storage-e2e] FAILED:', err);
process.exitCode = 1;
} finally {
console.log('');
console.log('Cleanup…');
await cleanup(state).catch(err => {
console.error('[cleanup] non-fatal error:', err);
});
}
}
async function cleanup(state: State): Promise<void> {
// Object (best-effort; usually already deleted on the happy path).
if (state.uploadedObject && state.hmacAccessId) {
// The credential needed to delete the object lives only in the
// smoke run's memory; if we crashed before saving the secret,
// we can't delete it as the workspace SA. Fall back to deleting
// the bucket which atomically removes contents (deleteBucket
// requires an empty bucket — use force-delete via objects.delete
// listing if it ever matters).
}
// HMAC key.
if (state.hmacAccessId) {
try {
await deleteHmacKey(state.hmacAccessId);
console.log(` ✓ HMAC ${state.hmacAccessId} deleted`);
} catch (err) {
console.warn(` ⚠ HMAC delete failed:`, err);
}
}
// Bucket. Must be empty; if a test object survived, list+delete first.
if (state.bucketCreated) {
try {
// Try a hard delete; if the bucket has objects we'll get 409.
await deleteBucket(BUCKET);
console.log(` ✓ bucket ${BUCKET} deleted`);
} catch (err) {
console.warn(` ⚠ bucket delete failed (objects may remain):`, err);
}
}
// SA keys + SA itself.
if (state.saCreated) {
try {
await deleteAllSaKeysAndSa(SA_EMAIL);
console.log(` ✓ SA ${SA_EMAIL} + keys deleted`);
} catch (err) {
console.warn(` ⚠ SA cleanup failed:`, err);
}
}
}
// ────────────────────────────────────────────────────────────────────
// Helpers — SA cleanup using the IAM API directly (the lib only exposes
// create paths).
// ────────────────────────────────────────────────────────────────────
import { getGcpAccessToken } from '../lib/gcp-auth';
async function deleteAllSaKeysAndSa(email: string): Promise<void> {
const token = await getGcpAccessToken();
const base = `https://iam.googleapis.com/v1/projects/${GCP_PROJECT_ID}/serviceAccounts/${encodeURIComponent(email)}`;
// Delete user-managed keys (system-managed keys can't be deleted).
const listRes = await fetch(`${base}/keys?keyTypes=USER_MANAGED`, {
headers: { Authorization: `Bearer ${token}` },
});
if (listRes.ok) {
const listJson = (await listRes.json()) as { keys?: { name: string }[] };
for (const k of listJson.keys ?? []) {
const id = k.name.split('/').pop();
if (!id) continue;
const delRes = await fetch(`${base}/keys/${id}`, {
method: 'DELETE',
headers: { Authorization: `Bearer ${token}` },
});
if (!delRes.ok && delRes.status !== 404) {
console.warn(` ⚠ key ${id} delete → ${delRes.status}`);
}
}
}
// Delete the SA.
const delRes = await fetch(base, {
method: 'DELETE',
headers: { Authorization: `Bearer ${token}` },
});
if (!delRes.ok && delRes.status !== 404) {
throw new Error(`SA delete → ${delRes.status} ${await delRes.text()}`);
}
}
// ────────────────────────────────────────────────────────────────────
// AWS SigV4 against the GCS XML API
//
// We re-implement SigV4 here rather than pulling in @aws-sdk to keep
// this script dependency-light. GCS treats the bucket as a virtual host
// (https://{bucket}.storage.googleapis.com/{key}) and uses region
// "auto" with service "s3".
// ────────────────────────────────────────────────────────────────────
interface S3Creds {
accessKeyId: string;
secretAccessKey: string;
}
async function s3PutObject(opts: S3Creds & {
bucket: string;
key: string;
body: Buffer;
contentType?: string;
}): Promise<void> {
const url = `https://${opts.bucket}.storage.googleapis.com/${encodeURIComponent(opts.key)}`;
const res = await sigv4Fetch({
method: 'PUT',
url,
body: opts.body,
contentType: opts.contentType,
accessKeyId: opts.accessKeyId,
secretAccessKey: opts.secretAccessKey,
});
if (!res.ok) throw new Error(`PUT ${opts.key}${res.status} ${await res.text()}`);
}
async function s3GetObject(opts: S3Creds & { bucket: string; key: string }): Promise<Buffer> {
const url = `https://${opts.bucket}.storage.googleapis.com/${encodeURIComponent(opts.key)}`;
const res = await sigv4Fetch({
method: 'GET',
url,
accessKeyId: opts.accessKeyId,
secretAccessKey: opts.secretAccessKey,
});
if (!res.ok) throw new Error(`GET ${opts.key}${res.status} ${await res.text()}`);
return Buffer.from(await res.arrayBuffer());
}
async function s3DeleteObject(opts: S3Creds & { bucket: string; key: string }): Promise<void> {
const url = `https://${opts.bucket}.storage.googleapis.com/${encodeURIComponent(opts.key)}`;
const res = await sigv4Fetch({
method: 'DELETE',
url,
accessKeyId: opts.accessKeyId,
secretAccessKey: opts.secretAccessKey,
});
if (!res.ok && res.status !== 404) {
throw new Error(`DELETE ${opts.key}${res.status} ${await res.text()}`);
}
}
interface SigV4FetchOpts extends S3Creds {
method: 'GET' | 'PUT' | 'DELETE';
url: string;
body?: Buffer;
contentType?: string;
}
async function sigv4Fetch(opts: SigV4FetchOpts): Promise<Response> {
const { method, url, body, contentType, accessKeyId, secretAccessKey } = opts;
const u = new URL(url);
const host = u.host;
const path = u.pathname || '/';
const query = u.search.slice(1);
const now = new Date();
const amzDate = now.toISOString().replace(/[:-]|\.\d{3}/g, '');
const dateStamp = amzDate.slice(0, 8);
const region = 'auto';
const service = 's3';
const payloadHash = body
? createHash('sha256').update(body).digest('hex')
: createHash('sha256').update('').digest('hex');
const headers: Record<string, string> = {
host,
'x-amz-date': amzDate,
'x-amz-content-sha256': payloadHash,
};
if (contentType) headers['content-type'] = contentType;
if (body) headers['content-length'] = String(body.length);
const signedHeaders = Object.keys(headers).map(k => k.toLowerCase()).sort().join(';');
const canonicalHeaders =
Object.keys(headers)
.map(k => [k.toLowerCase(), String(headers[k]).trim()] as const)
.sort(([a], [b]) => a.localeCompare(b))
.map(([k, v]) => `${k}:${v}\n`)
.join('');
const canonicalRequest = [
method,
path,
query,
canonicalHeaders,
signedHeaders,
payloadHash,
].join('\n');
const credentialScope = `${dateStamp}/${region}/${service}/aws4_request`;
const stringToSign = [
'AWS4-HMAC-SHA256',
amzDate,
credentialScope,
createHash('sha256').update(canonicalRequest).digest('hex'),
].join('\n');
const kDate = createHmac('sha256', `AWS4${secretAccessKey}`).update(dateStamp).digest();
const kRegion = createHmac('sha256', kDate).update(region).digest();
const kService = createHmac('sha256', kRegion).update(service).digest();
const kSigning = createHmac('sha256', kService).update('aws4_request').digest();
const signature = createHmac('sha256', kSigning).update(stringToSign).digest('hex');
const authorization =
`AWS4-HMAC-SHA256 Credential=${accessKeyId}/${credentialScope}, ` +
`SignedHeaders=${signedHeaders}, Signature=${signature}`;
return fetch(url, {
method,
headers: { ...headers, Authorization: authorization },
body: body ? new Uint8Array(body) : undefined,
});
}
function sleep(ms: number): Promise<void> {
return new Promise(resolve => setTimeout(resolve, ms));
}
main();