Initial commit: Product OS platform

- Control Plane API with Gemini integration
- Executors: Deploy, Analytics, Marketing
- MCP Adapter for Continue integration
- VSCode/VSCodium extension
- Tool registry and run tracking
- In-memory storage for local dev
- Terraform infrastructure setup
This commit is contained in:
2026-01-19 20:34:43 -08:00
commit b6d7148ded
58 changed files with 5365 additions and 0 deletions

View File

@@ -0,0 +1,30 @@
{
"name": "@productos/control-plane",
"version": "0.1.0",
"private": true,
"type": "module",
"main": "dist/index.js",
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc -p tsconfig.json",
"start": "node dist/index.js",
"lint": "eslint ."
},
"dependencies": {
"@google-cloud/firestore": "^7.11.0",
"@google-cloud/storage": "^7.14.0",
"@fastify/cors": "^10.0.0",
"@fastify/helmet": "^13.0.0",
"@fastify/rate-limit": "^10.0.0",
"@fastify/sensible": "^6.0.0",
"fastify": "^5.0.0",
"zod": "^3.23.8",
"nanoid": "^5.0.7"
},
"devDependencies": {
"@types/node": "^22.0.0",
"tsx": "^4.19.0",
"typescript": "^5.5.4",
"eslint": "^9.8.0"
}
}

View File

@@ -0,0 +1,11 @@
import { FastifyRequest } from "fastify";
import { config } from "./config.js";
/**
* V1: dev mode = trust caller.
* V2: validate Google OAuth/IAP identity token.
*/
export async function requireAuth(req: FastifyRequest) {
if (config.authMode === "dev") return;
throw new Error("AUTH_MODE oauth not yet implemented");
}

View File

@@ -0,0 +1,10 @@
export const config = {
port: Number(process.env.PORT ?? 8080),
projectId: process.env.GCP_PROJECT_ID ?? "productos-local",
artifactsBucket: process.env.GCS_BUCKET_ARTIFACTS ?? "productos-artifacts-local",
runsCollection: process.env.FIRESTORE_COLLECTION_RUNS ?? "runs",
toolsCollection: process.env.FIRESTORE_COLLECTION_TOOLS ?? "tools",
authMode: process.env.AUTH_MODE ?? "dev",
// Use in-memory storage when STORAGE_MODE=memory or when no GCP project is configured
storageMode: process.env.STORAGE_MODE ?? (process.env.GCP_PROJECT_ID ? "gcp" : "memory")
};

View File

@@ -0,0 +1,365 @@
/**
* Gemini Integration for Product OS
*
* Supports:
* - Chat completions with streaming
* - Tool/function calling
* - Context-aware responses
*
* Set GOOGLE_CLOUD_PROJECT and optionally GEMINI_MODEL env vars.
* For local dev without Vertex AI, set GEMINI_API_KEY for AI Studio.
*/
import { config } from "./config.js";
export interface ChatMessage {
role: "user" | "assistant" | "system";
content: string;
}
export interface ToolCall {
name: string;
arguments: Record<string, any>;
}
export interface ChatResponse {
message: string;
toolCalls?: ToolCall[];
finishReason: "stop" | "tool_calls" | "error";
}
// Tool definitions that Gemini can call
export const PRODUCT_OS_TOOLS = [
{
name: "deploy_service",
description: "Deploy a Cloud Run service. Use when user wants to deploy, ship, or launch code.",
parameters: {
type: "object",
properties: {
service_name: { type: "string", description: "Name of the service to deploy" },
repo: { type: "string", description: "Git repository URL" },
ref: { type: "string", description: "Git branch, tag, or commit" },
env: { type: "string", enum: ["dev", "staging", "prod"], description: "Target environment" }
},
required: ["service_name"]
}
},
{
name: "get_funnel_analytics",
description: "Get funnel conversion metrics. Use when user asks about funnels, conversions, or drop-offs.",
parameters: {
type: "object",
properties: {
range_days: { type: "integer", description: "Number of days to analyze", default: 30 }
}
}
},
{
name: "get_top_drivers",
description: "Identify top factors driving a metric. Use when user asks why something changed or what drives conversions.",
parameters: {
type: "object",
properties: {
metric: { type: "string", description: "The metric to analyze (e.g., 'conversion', 'retention')" },
range_days: { type: "integer", description: "Number of days to analyze", default: 30 }
},
required: ["metric"]
}
},
{
name: "generate_marketing_posts",
description: "Generate social media posts for a campaign. Use when user wants to create marketing content.",
parameters: {
type: "object",
properties: {
goal: { type: "string", description: "Campaign goal (e.g., 'launch announcement')" },
product: { type: "string", description: "Product or feature name" },
channels: {
type: "array",
items: { type: "string" },
description: "Social channels (e.g., ['x', 'linkedin'])"
}
},
required: ["goal"]
}
},
{
name: "get_service_status",
description: "Check the status of a deployed service. Use when user asks about service health or deployment status.",
parameters: {
type: "object",
properties: {
service_name: { type: "string", description: "Name of the service" },
region: { type: "string", description: "GCP region", default: "us-central1" }
},
required: ["service_name"]
}
},
{
name: "generate_code",
description: "Generate or modify code. Use when user asks to write, fix, refactor, or change code.",
parameters: {
type: "object",
properties: {
task: { type: "string", description: "What code change to make" },
file_path: { type: "string", description: "Target file path (if known)" },
language: { type: "string", description: "Programming language" },
context: { type: "string", description: "Additional context about the codebase" }
},
required: ["task"]
}
}
];
// System prompt for Product OS assistant
const SYSTEM_PROMPT = `You are Product OS, an AI assistant specialized in helping users launch and operate SaaS products on Google Cloud.
You can help with:
- Deploying services to Cloud Run
- Analyzing product metrics and funnels
- Generating marketing content
- Writing and modifying code
- Understanding what drives user behavior
When users ask you to do something, use the available tools to take action. Be concise and helpful.
If a user asks about code, analyze their request and either:
1. Use generate_code tool for code changes
2. Provide explanations directly
Always confirm before taking destructive actions like deploying to production.`;
/**
* Chat with Gemini
* Uses Vertex AI in production, or AI Studio API key for local dev
*/
export async function chat(
messages: ChatMessage[],
options: { stream?: boolean } = {}
): Promise<ChatResponse> {
const apiKey = process.env.GEMINI_API_KEY;
const projectId = config.projectId;
const model = process.env.GEMINI_MODEL ?? "gemini-1.5-flash";
// Build the request
const contents = [
{ role: "user", parts: [{ text: SYSTEM_PROMPT }] },
{ role: "model", parts: [{ text: "Understood. I'm Product OS, ready to help you build and operate your SaaS product. How can I help?" }] },
...messages.map(m => ({
role: m.role === "assistant" ? "model" : "user",
parts: [{ text: m.content }]
}))
];
const tools = [{
functionDeclarations: PRODUCT_OS_TOOLS
}];
// Use AI Studio API if API key is set (local dev)
if (apiKey) {
return chatWithAIStudio(apiKey, model, contents, tools);
}
// Use Vertex AI if project is set (production)
if (projectId && projectId !== "productos-local") {
return chatWithVertexAI(projectId, model, contents, tools);
}
// Mock response for local dev without credentials
return mockChat(messages);
}
async function chatWithAIStudio(
apiKey: string,
model: string,
contents: any[],
tools: any[]
): Promise<ChatResponse> {
const url = `https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`;
const response = await fetch(url, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
contents,
tools,
generationConfig: {
temperature: 0.7,
maxOutputTokens: 2048
}
})
});
if (!response.ok) {
const error = await response.text();
console.error("Gemini API error:", error);
throw new Error(`Gemini API error: ${response.status}`);
}
const data = await response.json();
return parseGeminiResponse(data);
}
async function chatWithVertexAI(
projectId: string,
model: string,
contents: any[],
tools: any[]
): Promise<ChatResponse> {
// Vertex AI endpoint
const location = process.env.VERTEX_LOCATION ?? "us-central1";
const url = `https://${location}-aiplatform.googleapis.com/v1/projects/${projectId}/locations/${location}/publishers/google/models/${model}:generateContent`;
// Get access token (requires gcloud auth)
const { GoogleAuth } = await import("google-auth-library");
const auth = new GoogleAuth({ scopes: ["https://www.googleapis.com/auth/cloud-platform"] });
const client = await auth.getClient();
const token = await client.getAccessToken();
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${token.token}`
},
body: JSON.stringify({
contents,
tools,
generationConfig: {
temperature: 0.7,
maxOutputTokens: 2048
}
})
});
if (!response.ok) {
const error = await response.text();
console.error("Vertex AI error:", error);
throw new Error(`Vertex AI error: ${response.status}`);
}
const data = await response.json();
return parseGeminiResponse(data);
}
function parseGeminiResponse(data: any): ChatResponse {
const candidate = data.candidates?.[0];
if (!candidate) {
return { message: "No response from Gemini", finishReason: "error" };
}
const content = candidate.content;
const parts = content?.parts ?? [];
// Check for function calls
const functionCalls = parts.filter((p: any) => p.functionCall);
if (functionCalls.length > 0) {
const toolCalls = functionCalls.map((p: any) => ({
name: p.functionCall.name,
arguments: p.functionCall.args ?? {}
}));
return {
message: "",
toolCalls,
finishReason: "tool_calls"
};
}
// Regular text response
const text = parts.map((p: any) => p.text ?? "").join("");
return {
message: text,
finishReason: "stop"
};
}
/**
* Mock chat for local development without Gemini credentials
*/
function mockChat(messages: ChatMessage[]): ChatResponse {
const lastMessage = messages[messages.length - 1]?.content.toLowerCase() ?? "";
// Check marketing/campaign FIRST (before deploy) since "launch" can be ambiguous
if (lastMessage.includes("marketing") || lastMessage.includes("campaign") || lastMessage.includes("post") ||
(lastMessage.includes("launch") && !lastMessage.includes("deploy"))) {
return {
message: "",
toolCalls: [{
name: "generate_marketing_posts",
arguments: { goal: "product launch", channels: ["x", "linkedin"] }
}],
finishReason: "tool_calls"
};
}
// Simple keyword matching to simulate tool calls
if (lastMessage.includes("deploy") || lastMessage.includes("ship") || lastMessage.includes("staging") || lastMessage.includes("production")) {
return {
message: "",
toolCalls: [{
name: "deploy_service",
arguments: { service_name: "my-service", env: lastMessage.includes("staging") ? "staging" : lastMessage.includes("prod") ? "prod" : "dev" }
}],
finishReason: "tool_calls"
};
}
if (lastMessage.includes("funnel") || lastMessage.includes("conversion") || lastMessage.includes("analytics")) {
return {
message: "",
toolCalls: [{
name: "get_funnel_analytics",
arguments: { range_days: 30 }
}],
finishReason: "tool_calls"
};
}
if (lastMessage.includes("why") || lastMessage.includes("driver") || lastMessage.includes("cause")) {
return {
message: "",
toolCalls: [{
name: "get_top_drivers",
arguments: { metric: "conversion", range_days: 30 }
}],
finishReason: "tool_calls"
};
}
if (lastMessage.includes("status") || lastMessage.includes("health")) {
return {
message: "",
toolCalls: [{
name: "get_service_status",
arguments: { service_name: "my-service" }
}],
finishReason: "tool_calls"
};
}
if (lastMessage.includes("code") || lastMessage.includes("function") || lastMessage.includes("write") || lastMessage.includes("create")) {
return {
message: "",
toolCalls: [{
name: "generate_code",
arguments: { task: lastMessage, language: "typescript" }
}],
finishReason: "tool_calls"
};
}
// Default response
return {
message: `I'm Product OS, your AI assistant for building and operating SaaS products. I can help you:
• **Deploy** services to Cloud Run
• **Analyze** funnel metrics and conversions
• **Generate** marketing content
• **Understand** what drives user behavior
What would you like to do?
_(Note: Running in mock mode - set GEMINI_API_KEY for real AI responses)_`,
finishReason: "stop"
};
}

View File

@@ -0,0 +1,29 @@
import Fastify from "fastify";
import cors from "@fastify/cors";
import helmet from "@fastify/helmet";
import rateLimit from "@fastify/rate-limit";
import sensible from "@fastify/sensible";
import { config } from "./config.js";
import { healthRoutes } from "./routes/health.js";
import { toolRoutes } from "./routes/tools.js";
import { runRoutes } from "./routes/runs.js";
import { chatRoutes } from "./routes/chat.js";
const app = Fastify({ logger: true });
await app.register(cors, { origin: true });
await app.register(helmet);
await app.register(sensible);
await app.register(rateLimit, { max: 300, timeWindow: "1 minute" });
await app.register(healthRoutes);
await app.register(toolRoutes);
await app.register(runRoutes);
await app.register(chatRoutes);
app.listen({ port: config.port, host: "0.0.0.0" }).then(() => {
console.log(`🚀 Control Plane API running on http://localhost:${config.port}`);
}).catch((err) => {
app.log.error(err);
process.exit(1);
});

View File

@@ -0,0 +1,10 @@
import type { ToolDef } from "./types.js";
import { listTools } from "./storage/index.js";
/**
* Simple registry. V2: cache + versioning + per-tenant overrides.
*/
export async function getRegistry(): Promise<Record<string, ToolDef>> {
const tools = await listTools();
return Object.fromEntries(tools.map(t => [t.name, t]));
}

View File

@@ -0,0 +1,306 @@
import type { FastifyInstance } from "fastify";
import { requireAuth } from "../auth.js";
import { chat, ChatMessage, ChatResponse, ToolCall } from "../gemini.js";
import { getRegistry } from "../registry.js";
import { saveRun, writeArtifactText } from "../storage/index.js";
import { nanoid } from "nanoid";
import type { RunRecord } from "../types.js";
interface ChatRequest {
messages: ChatMessage[];
context?: {
files?: { path: string; content: string }[];
selection?: { path: string; text: string; startLine: number };
};
autoExecuteTools?: boolean;
}
interface ChatResponseWithRuns extends ChatResponse {
runs?: RunRecord[];
}
export async function chatRoutes(app: FastifyInstance) {
/**
* Chat endpoint - proxies to Gemini with tool calling support
*/
app.post<{ Body: ChatRequest }>("/chat", async (req): Promise<ChatResponseWithRuns> => {
await requireAuth(req);
const { messages, context, autoExecuteTools = true } = req.body;
// Enhance messages with context if provided
let enhancedMessages = [...messages];
if (context?.files?.length) {
const fileContext = context.files
.map(f => `File: ${f.path}\n\`\`\`\n${f.content}\n\`\`\``)
.join("\n\n");
enhancedMessages = [
{ role: "user" as const, content: `Context:\n${fileContext}` },
...messages
];
}
if (context?.selection) {
const selectionContext = `Selected code in ${context.selection.path} (line ${context.selection.startLine}):\n\`\`\`\n${context.selection.text}\n\`\`\``;
enhancedMessages = [
{ role: "user" as const, content: selectionContext },
...messages
];
}
// Call Gemini
const response = await chat(enhancedMessages);
// If tool calls and auto-execute is enabled, run them
if (response.toolCalls && response.toolCalls.length > 0 && autoExecuteTools) {
const runs = await executeToolCalls(response.toolCalls, req.body);
// Generate a summary of what was done
const summary = generateToolSummary(response.toolCalls, runs);
return {
message: summary,
toolCalls: response.toolCalls,
runs,
finishReason: "tool_calls"
};
}
return response;
});
/**
* Streaming chat endpoint (SSE)
*/
app.get("/chat/stream", async (req, reply) => {
await requireAuth(req);
reply.raw.writeHead(200, {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive"
});
// For now, return a message that streaming is not yet implemented
reply.raw.write(`data: ${JSON.stringify({ message: "Streaming not yet implemented", finishReason: "stop" })}\n\n`);
reply.raw.end();
});
}
/**
* Execute tool calls by routing to the appropriate executor
*/
async function executeToolCalls(
toolCalls: ToolCall[],
request: ChatRequest
): Promise<RunRecord[]> {
const runs: RunRecord[] = [];
const registry = await getRegistry();
for (const toolCall of toolCalls) {
// Map tool call names to actual tools
const toolMapping: Record<string, string> = {
"deploy_service": "cloudrun.deploy_service",
"get_funnel_analytics": "analytics.funnel_summary",
"get_top_drivers": "analytics.top_drivers",
"generate_marketing_posts": "marketing.generate_channel_posts",
"get_service_status": "cloudrun.get_service_status",
"generate_code": "code.generate" // This one is special - handled inline
};
const actualToolName = toolMapping[toolCall.name];
// Special handling for code generation
if (toolCall.name === "generate_code") {
const codeRun = await handleCodeGeneration(toolCall.arguments);
runs.push(codeRun);
continue;
}
const tool = actualToolName ? registry[actualToolName] : null;
if (!tool) {
console.log(`Tool not found: ${toolCall.name} (mapped to ${actualToolName})`);
continue;
}
// Create a run
const runId = `run_${new Date().toISOString().replace(/[-:.TZ]/g, "")}_${nanoid(8)}`;
const now = new Date().toISOString();
const run: RunRecord = {
run_id: runId,
tenant_id: "t_chat",
tool: actualToolName,
status: "queued",
created_at: now,
updated_at: now,
input: toolCall.arguments,
artifacts: { bucket: "", prefix: `runs/${runId}` }
};
await saveRun(run);
// Execute the tool
try {
run.status = "running";
run.updated_at = new Date().toISOString();
await saveRun(run);
const execUrl = `${tool.executor.url}${tool.executor.path}`;
const response = await fetch(execUrl, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
run_id: runId,
tenant_id: "t_chat",
input: toolCall.arguments
})
});
if (!response.ok) {
throw new Error(`Executor error: ${response.status}`);
}
const output = await response.json();
run.status = "succeeded";
run.output = output;
run.updated_at = new Date().toISOString();
await saveRun(run);
} catch (e: any) {
run.status = "failed";
run.error = { message: e.message };
run.updated_at = new Date().toISOString();
await saveRun(run);
}
runs.push(run);
}
return runs;
}
/**
* Handle code generation specially
*/
async function handleCodeGeneration(args: any): Promise<RunRecord> {
const runId = `run_${new Date().toISOString().replace(/[-:.TZ]/g, "")}_${nanoid(8)}`;
const now = new Date().toISOString();
// For now, return a mock code generation result
// In production, this would call Gemini again with a code-specific prompt
const mockDiff = `--- a/${args.file_path || "src/example.ts"}
+++ b/${args.file_path || "src/example.ts"}
@@ -1,3 +1,10 @@
+// Generated by Product OS
+// Task: ${args.task}
+
export function example() {
- return "hello";
+ // TODO: Implement ${args.task}
+ return {
+ status: "generated",
+ task: "${args.task}"
+ };
}`;
const run: RunRecord = {
run_id: runId,
tenant_id: "t_chat",
tool: "code.generate",
status: "succeeded",
created_at: now,
updated_at: now,
input: args,
output: {
type: "code_generation",
diff: mockDiff,
file_path: args.file_path || "src/example.ts",
language: args.language || "typescript",
description: `Generated code for: ${args.task}`
}
};
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "diff.patch", mockDiff);
return run;
}
/**
* Generate a human-readable summary of tool executions
*/
function generateToolSummary(toolCalls: ToolCall[], runs: RunRecord[]): string {
const parts: string[] = [];
for (let i = 0; i < toolCalls.length; i++) {
const tool = toolCalls[i];
const run = runs[i];
if (!run) continue;
const status = run.status === "succeeded" ? "✅" : "❌";
switch (tool.name) {
case "deploy_service":
if (run.status === "succeeded") {
const output = run.output as any;
parts.push(`${status} **Deployed** \`${tool.arguments.service_name}\` to ${tool.arguments.env || "dev"}\n URL: ${output?.service_url || "pending"}`);
} else {
parts.push(`${status} **Deploy failed** for \`${tool.arguments.service_name}\`: ${run.error?.message}`);
}
break;
case "get_funnel_analytics":
if (run.status === "succeeded") {
const output = run.output as any;
const steps = output?.steps || [];
const conversion = ((output?.overall_conversion || 0) * 100).toFixed(1);
parts.push(`${status} **Funnel Analysis** (${tool.arguments.range_days || 30} days)\n Overall conversion: ${conversion}%\n Steps: ${steps.length}`);
}
break;
case "get_top_drivers":
if (run.status === "succeeded") {
const output = run.output as any;
const drivers = output?.drivers || [];
const topDrivers = drivers.slice(0, 3).map((d: any) => d.name).join(", ");
parts.push(`${status} **Top Drivers** for ${tool.arguments.metric}\n ${topDrivers}`);
}
break;
case "generate_marketing_posts":
if (run.status === "succeeded") {
const output = run.output as any;
const channels = output?.channels || [];
const postCount = channels.reduce((sum: number, c: any) => sum + (c.posts?.length || 0), 0);
parts.push(`${status} **Generated** ${postCount} marketing posts for ${channels.map((c: any) => c.channel).join(", ")}`);
}
break;
case "get_service_status":
if (run.status === "succeeded") {
const output = run.output as any;
parts.push(`${status} **Service Status**: \`${tool.arguments.service_name}\` is ${output?.status || "unknown"}`);
}
break;
case "generate_code":
if (run.status === "succeeded") {
parts.push(`${status} **Generated code** for: ${tool.arguments.task}\n File: \`${(run.output as any)?.file_path}\``);
}
break;
default:
parts.push(`${status} **${tool.name}** completed`);
}
}
if (parts.length === 0) {
return "I processed your request but no actions were taken.";
}
return parts.join("\n\n");
}

View File

@@ -0,0 +1,17 @@
import type { FastifyInstance } from "fastify";
export async function healthRoutes(app: FastifyInstance) {
// Root route - API info
app.get("/", async () => ({
name: "Product OS Control Plane",
version: "0.1.0",
endpoints: {
health: "GET /healthz",
tools: "GET /tools",
invoke: "POST /tools/invoke",
runs: "GET /runs/:run_id"
}
}));
app.get("/healthz", async () => ({ ok: true }));
}

View File

@@ -0,0 +1,18 @@
import type { FastifyInstance } from "fastify";
import { requireAuth } from "../auth.js";
import { getRun } from "../storage/index.js";
export async function runRoutes(app: FastifyInstance) {
app.get("/runs/:run_id", async (req) => {
await requireAuth(req);
const runId = (req.params as any).run_id as string;
const run = await getRun(runId);
if (!run) return app.httpErrors.notFound("Run not found");
return run;
});
app.get("/runs/:run_id/logs", async (req) => {
await requireAuth(req);
return { note: "V1: logs are in GCS artifacts under runs/<run_id>/" };
});
}

View File

@@ -0,0 +1,91 @@
import type { FastifyInstance } from "fastify";
import { nanoid } from "nanoid";
import { requireAuth } from "../auth.js";
import { getRegistry } from "../registry.js";
import { saveRun, writeArtifactText } from "../storage/index.js";
import type { RunRecord, ToolInvokeRequest } from "../types.js";
async function postJson(url: string, body: unknown) {
const res = await fetch(url, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify(body)
});
if (!res.ok) {
const txt = await res.text();
throw new Error(`Executor error ${res.status}: ${txt}`);
}
return res.json() as Promise<unknown>;
}
export async function toolRoutes(app: FastifyInstance) {
app.get("/tools", async (req) => {
await requireAuth(req);
const registry = await getRegistry();
return { tools: Object.values(registry) };
});
app.post<{ Body: ToolInvokeRequest }>("/tools/invoke", async (req) => {
await requireAuth(req);
const body = req.body;
const registry = await getRegistry();
const tool = registry[body.tool];
if (!tool) return app.httpErrors.notFound(`Unknown tool: ${body.tool}`);
const runId = `run_${new Date().toISOString().replace(/[-:.TZ]/g, "")}_${nanoid(8)}`;
const now = new Date().toISOString();
const run: RunRecord = {
run_id: runId,
tenant_id: body.tenant_id,
tool: body.tool,
status: "queued",
created_at: now,
updated_at: now,
input: body.input,
artifacts: { bucket: process.env.GCS_BUCKET_ARTIFACTS ?? "", prefix: `runs/${runId}` }
};
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "input.json", JSON.stringify(body, null, 2));
try {
run.status = "running";
run.updated_at = new Date().toISOString();
await saveRun(run);
if (body.dry_run) {
run.status = "succeeded";
run.output = { dry_run: true };
run.updated_at = new Date().toISOString();
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "output.json", JSON.stringify(run.output, null, 2));
return { run_id: runId, status: run.status };
}
const execUrl = `${tool.executor.url}${tool.executor.path}`;
const output = await postJson(execUrl, {
run_id: runId,
tenant_id: body.tenant_id,
workspace_id: body.workspace_id,
input: body.input
});
run.status = "succeeded";
run.output = output;
run.updated_at = new Date().toISOString();
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "output.json", JSON.stringify(output, null, 2));
return { run_id: runId, status: run.status };
} catch (e: any) {
run.status = "failed";
run.error = { message: e?.message ?? "Unknown error" };
run.updated_at = new Date().toISOString();
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "error.json", JSON.stringify(run.error, null, 2));
return { run_id: runId, status: run.status };
}
});
}

View File

@@ -0,0 +1,23 @@
import { Firestore } from "@google-cloud/firestore";
import { config } from "../config.js";
import type { RunRecord, ToolDef } from "../types.js";
const db = new Firestore({ projectId: config.projectId });
export async function saveRun(run: RunRecord): Promise<void> {
await db.collection(config.runsCollection).doc(run.run_id).set(run, { merge: true });
}
export async function getRun(runId: string): Promise<RunRecord | null> {
const snap = await db.collection(config.runsCollection).doc(runId).get();
return snap.exists ? (snap.data() as RunRecord) : null;
}
export async function saveTool(tool: ToolDef): Promise<void> {
await db.collection(config.toolsCollection).doc(tool.name).set(tool, { merge: true });
}
export async function listTools(): Promise<ToolDef[]> {
const snap = await db.collection(config.toolsCollection).get();
return snap.docs.map(d => d.data() as ToolDef);
}

View File

@@ -0,0 +1,11 @@
import { Storage } from "@google-cloud/storage";
import { config } from "../config.js";
const storage = new Storage({ projectId: config.projectId });
export async function writeArtifactText(prefix: string, filename: string, content: string) {
const bucket = storage.bucket(config.artifactsBucket);
const file = bucket.file(`${prefix}/${filename}`);
await file.save(content, { contentType: "text/plain" });
return { bucket: config.artifactsBucket, path: `${prefix}/${filename}` };
}

View File

@@ -0,0 +1,23 @@
/**
* Storage adapter that switches between GCP (Firestore/GCS) and in-memory
*/
import { config } from "../config.js";
import * as memory from "./memory.js";
import * as firestore from "./firestore.js";
import * as gcs from "./gcs.js";
const useMemory = config.storageMode === "memory";
if (useMemory) {
console.log("💾 Using in-memory storage (set GCP_PROJECT_ID for Firestore/GCS)");
memory.seedTools();
} else {
console.log(`☁️ Using GCP storage (project: ${config.projectId})`);
}
// Export unified interface
export const saveRun = useMemory ? memory.saveRun : firestore.saveRun;
export const getRun = useMemory ? memory.getRun : firestore.getRun;
export const saveTool = useMemory ? memory.saveTool : firestore.saveTool;
export const listTools = useMemory ? memory.listTools : firestore.listTools;
export const writeArtifactText = useMemory ? memory.writeArtifactText : gcs.writeArtifactText;

View File

@@ -0,0 +1,116 @@
/**
* In-memory storage for local development without Firestore/GCS
*/
import type { RunRecord, ToolDef } from "../types.js";
// In-memory stores
const runs = new Map<string, RunRecord>();
const tools = new Map<string, ToolDef>();
const artifacts = new Map<string, string>();
// Run operations
export async function saveRun(run: RunRecord): Promise<void> {
runs.set(run.run_id, { ...run });
}
export async function getRun(runId: string): Promise<RunRecord | null> {
return runs.get(runId) ?? null;
}
// Tool operations
export async function saveTool(tool: ToolDef): Promise<void> {
tools.set(tool.name, { ...tool });
}
export async function listTools(): Promise<ToolDef[]> {
return Array.from(tools.values());
}
// Artifact operations
export async function writeArtifactText(prefix: string, filename: string, content: string) {
const path = `${prefix}/${filename}`;
artifacts.set(path, content);
return { bucket: "memory", path };
}
// Seed some example tools for testing
export function seedTools() {
const sampleTools: ToolDef[] = [
{
name: "cloudrun.deploy_service",
description: "Build and deploy a Cloud Run service",
risk: "medium",
executor: { kind: "http", url: "http://localhost:8090", path: "/execute/cloudrun/deploy" },
inputSchema: {
type: "object",
required: ["service_name", "repo", "ref", "env"],
properties: {
service_name: { type: "string" },
repo: { type: "string" },
ref: { type: "string" },
env: { type: "string", enum: ["dev", "staging", "prod"] }
}
}
},
{
name: "cloudrun.get_service_status",
description: "Get Cloud Run service status",
risk: "low",
executor: { kind: "http", url: "http://localhost:8090", path: "/execute/cloudrun/status" },
inputSchema: {
type: "object",
required: ["service_name", "region"],
properties: {
service_name: { type: "string" },
region: { type: "string" }
}
}
},
{
name: "analytics.funnel_summary",
description: "Get funnel metrics for a time window",
risk: "low",
executor: { kind: "http", url: "http://localhost:8091", path: "/execute/analytics/funnel" },
inputSchema: {
type: "object",
required: ["range_days"],
properties: {
range_days: { type: "integer", minimum: 1, maximum: 365 }
}
}
},
{
name: "brand.get_profile",
description: "Get tenant brand profile",
risk: "low",
executor: { kind: "http", url: "http://localhost:8092", path: "/execute/brand/get" },
inputSchema: {
type: "object",
required: ["profile_id"],
properties: {
profile_id: { type: "string" }
}
}
},
{
name: "marketing.generate_channel_posts",
description: "Generate social posts from a brief",
risk: "low",
executor: { kind: "http", url: "http://localhost:8093", path: "/execute/marketing/generate" },
inputSchema: {
type: "object",
required: ["brief", "channels"],
properties: {
brief: { type: "object" },
channels: { type: "array", items: { type: "string" } }
}
}
}
];
for (const tool of sampleTools) {
tools.set(tool.name, tool);
}
console.log(`📦 Seeded ${sampleTools.length} tools in memory`);
}

View File

@@ -0,0 +1,37 @@
export type ToolRisk = "low" | "medium" | "high";
export type ToolDef = {
name: string;
description: string;
risk: ToolRisk;
executor: {
kind: "http";
url: string;
path: string;
};
inputSchema: unknown;
outputSchema?: unknown;
};
export type ToolInvokeRequest = {
tool: string;
tenant_id: string;
workspace_id?: string;
input: unknown;
dry_run?: boolean;
};
export type RunStatus = "queued" | "running" | "succeeded" | "failed";
export type RunRecord = {
run_id: string;
tenant_id: string;
tool: string;
status: RunStatus;
created_at: string;
updated_at: string;
input: unknown;
output?: unknown;
error?: { message: string; details?: unknown };
artifacts?: { bucket: string; prefix: string };
};

View File

@@ -0,0 +1,13 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ES2022",
"moduleResolution": "Bundler",
"outDir": "dist",
"rootDir": "src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"types": ["node"]
}
}