Update documentation files

This commit is contained in:
2026-01-21 15:35:57 -08:00
parent cb8ff46020
commit 57b9ce2f1a
5 changed files with 3272 additions and 0 deletions

View File

@@ -0,0 +1,743 @@
1) Generate Control Plane API scaffold
Folder layout
backend/control-plane/
package.json
tsconfig.json
src/
index.ts
config.ts
auth.ts
registry.ts
types.ts
storage/
firestore.ts
gcs.ts
routes/
tools.ts
runs.ts
health.ts
.env.example
backend/control-plane/package.json
{
"name": "@productos/control-plane",
"version": "0.1.0",
"private": true,
"type": "module",
"main": "dist/index.js",
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc -p tsconfig.json",
"start": "node dist/index.js",
"lint": "eslint ."
},
"dependencies": {
"@google-cloud/firestore": "^7.11.0",
"@google-cloud/storage": "^7.14.0",
"@fastify/cors": "^9.0.1",
"@fastify/helmet": "^12.0.0",
"@fastify/rate-limit": "^9.1.0",
"fastify": "^4.28.1",
"zod": "^3.23.8",
"nanoid": "^5.0.7"
},
"devDependencies": {
"@types/node": "^22.0.0",
"tsx": "^4.19.0",
"typescript": "^5.5.4",
"eslint": "^9.8.0"
}
}
backend/control-plane/tsconfig.json
{
"compilerOptions": {
"target": "ES2022",
"module": "ES2022",
"moduleResolution": "Bundler",
"outDir": "dist",
"rootDir": "src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"types": ["node"]
}
}
backend/control-plane/.env.example
PORT=8080
GCP_PROJECT_ID=your-project-id
GCS_BUCKET_ARTIFACTS=productos-artifacts-dev
FIRESTORE_COLLECTION_RUNS=runs
FIRESTORE_COLLECTION_TOOLS=tools
# If you put behind IAP / OAuth later, validate ID tokens here:
AUTH_MODE=dev # dev | oauth
backend/control-plane/src/config.ts
export const config = {
port: Number(process.env.PORT ?? 8080),
projectId: process.env.GCP_PROJECT_ID ?? "",
artifactsBucket: process.env.GCS_BUCKET_ARTIFACTS ?? "",
runsCollection: process.env.FIRESTORE_COLLECTION_RUNS ?? "runs",
toolsCollection: process.env.FIRESTORE_COLLECTION_TOOLS ?? "tools",
authMode: process.env.AUTH_MODE ?? "dev"
};
backend/control-plane/src/types.ts
export type ToolRisk = "low" | "medium" | "high";
export type ToolDef = {
name: string;
description: string;
risk: ToolRisk;
executor: {
kind: "http";
url: string; // executor base url
path: string; // executor endpoint path
};
inputSchema: unknown; // JSON Schema object
outputSchema?: unknown; // JSON Schema object
};
export type ToolInvokeRequest = {
tool: string;
tenant_id: string;
workspace_id?: string;
input: unknown;
dry_run?: boolean;
};
export type RunStatus = "queued" | "running" | "succeeded" | "failed";
export type RunRecord = {
run_id: string;
tenant_id: string;
tool: string;
status: RunStatus;
created_at: string;
updated_at: string;
input: unknown;
output?: unknown;
error?: { message: string; details?: unknown };
artifacts?: { bucket: string; prefix: string };
};
backend/control-plane/src/auth.ts
import { FastifyRequest } from "fastify";
import { config } from "./config.js";
/**
* V1: dev mode = trust caller (or a shared API key later).
* V2: validate Google OAuth/IAP identity token and map to tenant/org.
*/
export async function requireAuth(req: FastifyRequest) {
if (config.authMode === "dev") return;
// Placeholder for OAuth/IAP verification:
// - read Authorization: Bearer <id_token>
// - verify token (Google JWKS)
// - attach req.user
throw new Error("AUTH_MODE oauth not yet implemented");
}
backend/control-plane/src/storage/firestore.ts
import { Firestore } from "@google-cloud/firestore";
import { config } from "../config.js";
import type { RunRecord, ToolDef } from "../types.js";
const db = new Firestore({ projectId: config.projectId });
export async function saveRun(run: RunRecord): Promise<void> {
await db.collection(config.runsCollection).doc(run.run_id).set(run, { merge: true });
}
export async function getRun(runId: string): Promise<RunRecord | null> {
const snap = await db.collection(config.runsCollection).doc(runId).get();
return snap.exists ? (snap.data() as RunRecord) : null;
}
export async function saveTool(tool: ToolDef): Promise<void> {
await db.collection(config.toolsCollection).doc(tool.name).set(tool, { merge: true });
}
export async function listTools(): Promise<ToolDef[]> {
const snap = await db.collection(config.toolsCollection).get();
return snap.docs.map(d => d.data() as ToolDef);
}
backend/control-plane/src/storage/gcs.ts
import { Storage } from "@google-cloud/storage";
import { config } from "../config.js";
const storage = new Storage({ projectId: config.projectId });
export async function writeArtifactText(prefix: string, filename: string, content: string) {
const bucket = storage.bucket(config.artifactsBucket);
const file = bucket.file(`${prefix}/${filename}`);
await file.save(content, { contentType: "text/plain" });
return { bucket: config.artifactsBucket, path: `${prefix}/${filename}` };
}
backend/control-plane/src/registry.ts
import type { ToolDef } from "./types.js";
import { listTools } from "./storage/firestore.js";
/**
* Simple registry. V2: cache + versioning + per-tenant overrides.
*/
export async function getRegistry(): Promise<Record<string, ToolDef>> {
const tools = await listTools();
return Object.fromEntries(tools.map(t => [t.name, t]));
}
backend/control-plane/src/routes/health.ts
import type { FastifyInstance } from "fastify";
export async function healthRoutes(app: FastifyInstance) {
app.get("/healthz", async () => ({ ok: true }));
}
backend/control-plane/src/routes/tools.ts
import type { FastifyInstance } from "fastify";
import { nanoid } from "nanoid";
import { requireAuth } from "../auth.js";
import { getRegistry } from "../registry.js";
import { saveRun } from "../storage/firestore.js";
import { writeArtifactText } from "../storage/gcs.js";
import type { RunRecord, ToolInvokeRequest } from "../types.js";
async function postJson(url: string, body: unknown) {
const res = await fetch(url, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify(body)
});
if (!res.ok) {
const txt = await res.text();
throw new Error(`Executor error ${res.status}: ${txt}`);
}
return res.json() as Promise<unknown>;
}
export async function toolRoutes(app: FastifyInstance) {
app.get("/tools", async (req) => {
await requireAuth(req);
const registry = await getRegistry();
return { tools: Object.values(registry) };
});
app.post<{ Body: ToolInvokeRequest }>("/tools/invoke", async (req) => {
await requireAuth(req);
const body = req.body;
const registry = await getRegistry();
const tool = registry[body.tool];
if (!tool) return app.httpErrors.notFound(`Unknown tool: ${body.tool}`);
const runId = `run_${new Date().toISOString().replace(/[-:.TZ]/g, "")}_${nanoid(8)}`;
const now = new Date().toISOString();
const run: RunRecord = {
run_id: runId,
tenant_id: body.tenant_id,
tool: body.tool,
status: "queued",
created_at: now,
updated_at: now,
input: body.input,
artifacts: { bucket: process.env.GCS_BUCKET_ARTIFACTS ?? "", prefix: `runs/${runId}` }
};
await saveRun(run);
// record input artifact
await writeArtifactText(`runs/${runId}`, "input.json", JSON.stringify(body, null, 2));
// execute (sync for v1; v2: push to Cloud Tasks / Workflows)
try {
run.status = "running";
run.updated_at = new Date().toISOString();
await saveRun(run);
if (body.dry_run) {
run.status = "succeeded";
run.output = { dry_run: true };
run.updated_at = new Date().toISOString();
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "output.json", JSON.stringify(run.output, null, 2));
return { run_id: runId, status: run.status };
}
const execUrl = `${tool.executor.url}${tool.executor.path}`;
const output = await postJson(execUrl, {
run_id: runId,
tenant_id: body.tenant_id,
workspace_id: body.workspace_id,
input: body.input
});
run.status = "succeeded";
run.output = output;
run.updated_at = new Date().toISOString();
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "output.json", JSON.stringify(output, null, 2));
return { run_id: runId, status: run.status };
} catch (e: any) {
run.status = "failed";
run.error = { message: e?.message ?? "Unknown error" };
run.updated_at = new Date().toISOString();
await saveRun(run);
await writeArtifactText(`runs/${runId}`, "error.json", JSON.stringify(run.error, null, 2));
return { run_id: runId, status: run.status };
}
});
}
backend/control-plane/src/routes/runs.ts
import type { FastifyInstance } from "fastify";
import { requireAuth } from "../auth.js";
import { getRun } from "../storage/firestore.js";
export async function runRoutes(app: FastifyInstance) {
app.get("/runs/:run_id", async (req) => {
await requireAuth(req);
// @ts-expect-error fastify param typing
const runId = req.params.run_id as string;
const run = await getRun(runId);
if (!run) return app.httpErrors.notFound("Run not found");
return run;
});
// V1: logs are stored as artifacts in GCS; IDE can fetch by signed URL later
app.get("/runs/:run_id/logs", async (req) => {
await requireAuth(req);
// stub
return { note: "V1: logs are in GCS artifacts under runs/<run_id>/" };
});
}
backend/control-plane/src/index.ts
import Fastify from "fastify";
import cors from "@fastify/cors";
import helmet from "@fastify/helmet";
import rateLimit from "@fastify/rate-limit";
import { config } from "./config.js";
import { healthRoutes } from "./routes/health.js";
import { toolRoutes } from "./routes/tools.js";
import { runRoutes } from "./routes/runs.js";
const app = Fastify({ logger: true });
await app.register(cors, { origin: true });
await app.register(helmet);
await app.register(rateLimit, { max: 300, timeWindow: "1 minute" });
await app.register(healthRoutes);
await app.register(toolRoutes);
await app.register(runRoutes);
app.listen({ port: config.port, host: "0.0.0.0" }).catch((err) => {
app.log.error(err);
process.exit(1);
});
2) Generate Tool Registry schema
You want two things:
A human-editable YAML (source of truth)
A JSON Schema to validate tool definitions
contracts/tool-registry.yaml (example)
version: 1
tools:
cloudrun.deploy_service:
description: Deploy a Cloud Run service via Cloud Build.
risk: medium
executor:
kind: http
url: https://deploy-executor-xxxxx.a.run.app
path: /execute/deploy
inputSchema:
type: object
required: [service_name, repo, ref, env]
properties:
service_name: { type: string }
repo: { type: string, description: "Git repo URL" }
ref: { type: string, description: "Branch, tag, or commit SHA" }
env: { type: string, enum: ["dev", "staging", "prod"] }
outputSchema:
type: object
properties:
service_url: { type: string }
revision: { type: string }
analytics.get_funnel_summary:
description: Return funnel metrics for a tenant and time window.
risk: low
executor:
kind: http
url: https://analytics-executor-xxxxx.a.run.app
path: /execute/funnel
inputSchema:
type: object
required: [range_days]
properties:
range_days: { type: integer, minimum: 1, maximum: 365 }
segment: { type: object }
contracts/tool-registry.schema.json
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://productos.dev/schemas/tool-registry.schema.json",
"type": "object",
"required": ["version", "tools"],
"properties": {
"version": { "type": "integer", "minimum": 1 },
"tools": {
"type": "object",
"additionalProperties": { "$ref": "#/$defs/ToolDef" }
}
},
"$defs": {
"ToolDef": {
"type": "object",
"required": ["description", "risk", "executor", "inputSchema"],
"properties": {
"description": { "type": "string" },
"risk": { "type": "string", "enum": ["low", "medium", "high"] },
"executor": { "$ref": "#/$defs/Executor" },
"inputSchema": { "type": "object" },
"outputSchema": { "type": "object" }
},
"additionalProperties": false
},
"Executor": {
"type": "object",
"required": ["kind", "url", "path"],
"properties": {
"kind": { "type": "string", "enum": ["http"] },
"url": { "type": "string" },
"path": { "type": "string" }
},
"additionalProperties": false
}
}
}
3) Generate VSCodium extension skeleton
Folder layout
client-ide/extensions/gcp-productos/
package.json
tsconfig.json
src/extension.ts
src/api.ts
src/ui.ts
media/icon.png (optional)
client-ide/extensions/gcp-productos/package.json
{
"name": "gcp-productos",
"displayName": "GCP Product OS",
"description": "Product-centric panels (Code, Marketing, Analytics, Growth...) and backend tool invocation.",
"version": "0.0.1",
"publisher": "productos",
"engines": { "vscode": "^1.90.0" },
"categories": ["Other"],
"activationEvents": ["onStartupFinished"],
"main": "./dist/extension.js",
"contributes": {
"commands": [
{ "command": "productos.configure", "title": "Product OS: Configure Backend" },
{ "command": "productos.tools.list", "title": "Product OS: List Tools" },
{ "command": "productos.tools.invoke", "title": "Product OS: Invoke Tool" },
{ "command": "productos.runs.open", "title": "Product OS: Open Run" }
],
"configuration": {
"title": "Product OS",
"properties": {
"productos.backendUrl": {
"type": "string",
"default": "http://localhost:8080",
"description": "Control Plane API base URL"
},
"productos.tenantId": {
"type": "string",
"default": "t_dev",
"description": "Tenant ID for tool calls"
}
}
}
},
"scripts": {
"build": "tsc -p tsconfig.json",
"watch": "tsc -w -p tsconfig.json"
},
"devDependencies": {
"@types/node": "^22.0.0",
"@types/vscode": "^1.90.0",
"typescript": "^5.5.4"
}
}
client-ide/extensions/gcp-productos/tsconfig.json
{
"compilerOptions": {
"target": "ES2022",
"module": "CommonJS",
"outDir": "dist",
"rootDir": "src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true
}
}
client-ide/extensions/gcp-productos/src/api.ts
import * as vscode from "vscode";
function cfg<T>(key: string): T {
return vscode.workspace.getConfiguration("productos").get<T>(key)!;
}
export function backendUrl() {
return cfg<string>("backendUrl");
}
export function tenantId() {
return cfg<string>("tenantId");
}
export async function listTools(): Promise<any[]> {
const res = await fetch(`${backendUrl()}/tools`);
if (!res.ok) throw new Error(await res.text());
const json = await res.json();
return json.tools ?? [];
}
export async function invokeTool(tool: string, input: any) {
const res = await fetch(`${backendUrl()}/tools/invoke`, {
method: "POST",
headers: { "content-type": "application/json" },
body: JSON.stringify({
tool,
tenant_id: tenantId(),
input
})
});
if (!res.ok) throw new Error(await res.text());
return res.json();
}
export async function getRun(runId: string) {
const res = await fetch(`${backendUrl()}/runs/${runId}`);
if (!res.ok) throw new Error(await res.text());
return res.json();
}
client-ide/extensions/gcp-productos/src/ui.ts
import * as vscode from "vscode";
import { getRun } from "./api";
export async function showJson(title: string, obj: any) {
const doc = await vscode.workspace.openTextDocument({
content: JSON.stringify(obj, null, 2),
language: "json"
});
await vscode.window.showTextDocument(doc, { preview: false });
vscode.window.setStatusBarMessage(title, 3000);
}
export async function openRun(runId: string) {
const run = await getRun(runId);
await showJson(`Run ${runId}`, run);
}
client-ide/extensions/gcp-productos/src/extension.ts
import * as vscode from "vscode";
import { invokeTool, listTools } from "./api";
import { openRun, showJson } from "./ui";
export function activate(context: vscode.ExtensionContext) {
context.subscriptions.push(
vscode.commands.registerCommand("productos.configure", async () => {
const backendUrl = await vscode.window.showInputBox({ prompt: "Control Plane backend URL" });
if (!backendUrl) return;
await vscode.workspace.getConfiguration("productos").update("backendUrl", backendUrl, vscode.ConfigurationTarget.Global);
vscode.window.showInformationMessage(`Product OS backend set: ${backendUrl}`);
})
);
context.subscriptions.push(
vscode.commands.registerCommand("productos.tools.list", async () => {
const tools = await listTools();
await showJson("Tools", tools);
})
);
context.subscriptions.push(
vscode.commands.registerCommand("productos.tools.invoke", async () => {
const tools = await listTools();
const pick = await vscode.window.showQuickPick(
tools.map((t: any) => ({ label: t.name, description: t.description })),
{ placeHolder: "Select a tool to invoke" }
);
if (!pick) return;
const inputText = await vscode.window.showInputBox({
prompt: "Tool input JSON",
value: "{}"
});
if (!inputText) return;
const input = JSON.parse(inputText);
const result = await invokeTool(pick.label, input);
await showJson("Invoke Result", result);
if (result?.run_id) {
const open = await vscode.window.showInformationMessage(`Run started: ${result.run_id}`, "Open Run");
if (open === "Open Run") await openRun(result.run_id);
}
})
);
context.subscriptions.push(
vscode.commands.registerCommand("productos.runs.open", async () => {
const runId = await vscode.window.showInputBox({ prompt: "Run ID" });
if (!runId) return;
await openRun(runId);
})
);
}
export function deactivate() {}
4) Generate Terraform base
This is a minimal hub-style baseline:
GCS bucket for artifacts
Firestore (Native) for runs/tools
Cloud Run service for Control Plane
Service accounts + IAM
Placeholders for executor services
Folder layout
infra/terraform/
providers.tf
variables.tf
outputs.tf
main.tf
iam.tf
infra/terraform/providers.tf
terraform {
required_version = ">= 1.5.0"
required_providers {
google = {
source = "hashicorp/google"
version = "~> 5.30"
}
}
}
provider "google" {
project = var.project_id
region = var.region
}
infra/terraform/variables.tf
variable "project_id" { type = string }
variable "region" { type = string default = "us-central1" }
variable "artifact_bucket_name" { type = string }
variable "control_plane_image" {
type = string
description = "Container image URI for control-plane (Artifact Registry)."
}
infra/terraform/main.tf
resource "google_storage_bucket" "artifacts" {
name = var.artifact_bucket_name
location = var.region
uniform_bucket_level_access = true
versioning { enabled = true }
}
# Firestore (Native mode) requires enabling in console once per project (or via API depending on org policy).
resource "google_firestore_database" "default" {
name = "(default)"
location_id = var.region
type = "FIRESTORE_NATIVE"
}
resource "google_service_account" "control_plane_sa" {
account_id = "sa-control-plane"
display_name = "Product OS Control Plane"
}
resource "google_cloud_run_v2_service" "control_plane" {
name = "control-plane"
location = var.region
template {
service_account = google_service_account.control_plane_sa.email
containers {
image = var.control_plane_image
env {
name = "GCP_PROJECT_ID"
value = var.project_id
}
env {
name = "GCS_BUCKET_ARTIFACTS"
value = google_storage_bucket.artifacts.name
}
env {
name = "AUTH_MODE"
value = "dev"
}
}
}
}
# Public access optional; prefer IAM auth in production.
resource "google_cloud_run_v2_service_iam_member" "control_plane_public" {
name = google_cloud_run_v2_service.control_plane.name
location = var.region
role = "roles/run.invoker"
member = "allUsers"
}
infra/terraform/iam.tf
# Allow control-plane to write artifacts in GCS
resource "google_storage_bucket_iam_member" "control_plane_bucket_writer" {
bucket = google_storage_bucket.artifacts.name
role = "roles/storage.objectAdmin"
member = "serviceAccount:${google_service_account.control_plane_sa.email}"
}
# Firestore access for run/tool metadata
resource "google_project_iam_member" "control_plane_firestore" {
project = var.project_id
role = "roles/datastore.user"
member = "serviceAccount:${google_service_account.control_plane_sa.email}"
}
# Placeholder: executor services will each have their own service accounts.
# Control-plane should be granted roles/run.invoker on each executor service once created.
infra/terraform/outputs.tf
output "control_plane_url" {
value = google_cloud_run_v2_service.control_plane.uri
}
output "artifact_bucket" {
value = google_storage_bucket.artifacts.name
}

View File

@@ -0,0 +1,681 @@
Google Cloud Product OS
Product-Centric IDE + SaaS Autopilot Platform (Requirements & Architecture)
Vision
Build a Product-Centric IDE and Automation Platform dedicated exclusively to:
Launching, growing, and operating SaaS products on Google Cloud
This is NOT a general-purpose IDE.
This is a Product Operating System (Product OS) designed to unify:
Code
Marketing
Analytics
Growth
Support
Experiments
Infrastructure
AI-driven automation
into one coherent platform.
It delivers:
A Cursor-like experience
Without Cursor cost
Powered by Gemini (Vertex AI)
Optimized specifically for Google Cloud
Focused exclusively on building & automating products
Core Product Principles
1. Product-Centric, Not Code-Centric
This platform optimizes for:
Shipping, launching, growing, and optimizing products, not just writing code.
2. Opinionated for Google Cloud
This system is:
Cloud Run-first
Firestore / Cloud SQL-native
BigQuery-native
Cloud Build-native
Gemini-native
No AWS, no Azure, no multi-cloud abstraction.
3. Automation First
Everything is:
Automatable
Observable
Auditable
Optimizable
4. AI as a Product Operator
The AI is not just a coding assistant.
It is a:
Product Operator AI
capable of coordinating marketing, growth, support, analytics, and code.
IDE Structure: Product-Centric Layout
Instead of a traditional IDE layout, the system must expose:
Product OS
├── Code
├── Marketing
├── Analytics
├── Growth
├── Support
├── Experiments
└── Infrastructure
Each section is first-class and AI-assisted.
Section Requirements
1. Code Section
Purpose:
Build and deploy product services
Must support:
Cloud Run services
Cloud SQL / Firestore integration
Secrets management
Logs & traces
Rollbacks
Service templates
Not required:
Arbitrary framework support
Every programming language
Optimized languages:
TypeScript / Node
Python
2. Marketing Section
Purpose:
Automate go-to-market and content execution
Must support:
Campaign generation
Social scheduling (Missinglettr)
Blog generation & updates
Landing page updates
Brand voice control
Product update → campaign pipeline
AI must:
Convert product changes into launch content
Adapt content to brand style
3. Analytics Section
Purpose:
Understand product performance and causality
Must support:
Funnels
Retention
Activation
Cohorts
LTV
Causal drivers
Experiment results
NOT a SQL editor.
This is a Product Intelligence Interface.
AI must answer:
“Why did conversion change?”
“What caused activation to drop?”
“What should we test next?”
4. Growth Section
Purpose:
Optimize onboarding and conversion
Must support:
Funnel definitions
Onboarding flows
Growth experiments
A/B tests
Nudge systems
Conversion optimization
AI must:
Detect drop-offs
Recommend experiments
Evaluate uplift
5. Support Section
Purpose:
Integrate customer feedback and product health
Must support:
Ticket ingestion
AI-assisted replies
Knowledge base generation
Product issue detection
Feedback loops into product & marketing
6. Experiments Section
Purpose:
Enable continuous product optimization
Must support:
Hypothesis creation
Experiment creation
Assignment
Result analysis
Causal impact estimation
Recommendation engine
7. Infrastructure Section
Purpose:
Hide GCP complexity behind product workflows
Must support:
Cloud Run provisioning
Pub/Sub
Cloud SQL / Firestore
IAM abstraction
Deploy / rollback
Resource health
No raw IAM or Terraform exposure by default.
Everything should be expressed as product-level actions.
AI System Design
Supervisor AI (Product Operator)
This is NOT a coding agent.
It is a:
Product Operator AI
capable of coordinating decisions across:
Marketing
Growth
Product
Support
Analytics
Experiments
Responsibilities:
Interpret product goals
Prioritize actions
Dispatch tasks to tools
Enforce policies
Learn from outcomes
Tool Execution Model (Critical Design Decision)
Backend Tool Execution (Option 1)
All tools execute on the backend.
The IDE:
NEVER runs gcloud
NEVER holds cloud credentials
NEVER touches databases directly
Instead:
IDE / Agent → Control Plane API → Executors → GCP Services
Benefits:
Security
Auditing
Shared automation with SaaS autopilot
Centralized policy enforcement
No local cloud configuration
Control Plane Architecture
Control Plane API
A Cloud Run service responsible for:
Authentication
Tool registry
Tool invocation routing
Policy enforcement
Run tracking
Artifact storage (GCS)
Gemini proxy
Core endpoints:
POST /tools/invoke
GET /runs/{id}
GET /runs/{id}/logs
GET /tools
GET /artifacts/{run_id}
Tool Registry
All actions are formalized as tools.
Example:
cloudrun.deploy_service
analytics.get_funnel_summary
firestore.update_company_brain
missinglettr.publish_campaign
experiments.create_ab_test
Each tool defines:
Input schema
Output schema
Risk level
Executor mapping
Used by:
IDE
Supervisor AI
Web Dashboard
Executors (Domain Services)
Each executor is a Cloud Run service with scoped permissions.
Deploy Executor
Cloud Build
Cloud Run
Artifact Registry
Analytics Executor
BigQuery
Causality modeling
Funnel analysis
Firestore Executor
Company Brain
Styles
Configs
SQL Executor
Summaries from Cloud SQL
Read-heavy
Missinglettr Executor
Campaign publishing
Scheduling
Data Layer
Firestore
Company Brain
Style profiles
Tool registry
Policy configs
Run metadata
GCS
Logs
Artifacts
AI outputs
Generated patches
Trace data
BigQuery
Events
Causality models
Experiments
Analytics warehouse
AI Code Editing Strategy
We do NOT build a new editor.
We use:
VS Code APIs
Patch-based updates
Flow:
AI generates structured diffs
IDE previews changes
User approves
IDE applies locally
Backend executes deploy/test
Later:
Backend can open PRs automatically
IDE Base Technology
Editor Base
We use:
✅ VSCodium
Not Code-OSS directly.
Reasons:
Open source
OpenVSX marketplace
Low maintenance
Redistributable
Fast to ship
Language Strategy
We support only:
TypeScript / Node
Python
This allows:
Better templates
Better debugging
Better automation
Faster AI alignment
IAM Strategy
Users
OAuth only
No GCP IAM exposure
Backend Service Accounts
Least privilege
Per-executor roles
No key files
Workload identity only
Product vs General IDE: Explicit Non-Goals
This platform is NOT:
A general code editor
A multi-cloud IDE
A framework playground
A replacement for VS Code for all use cases
It IS:
A Product Operating System
A SaaS automation platform
A GCP-native product launcher
An AI-driven product operator
Target Users
Solo founders
Indie hackers
Startup teams
AI-first SaaS companies
Product-led growth teams
Strategic Differentiation
You are not competing with:
VS Code
Cursor
JetBrains
You are competing with:
10+ disconnected tools:
Segment
HubSpot
Mixpanel
Amplitude
Intercom
Zapier
Notion
Google Cloud Console
Marketing schedulers
Experiment platforms
You replace them with:
One Product Operating System
Build Roadmap
Phase 1: Core Platform
Control Plane API
Deploy Executor
VSCodium Extension (Deploy + Logs)
Gemini integration
Phase 2: Product Intelligence
Firestore Executor (Company Brain)
Analytics Executor
Funnel + driver tools
Phase 3: Automation
Marketing Executor
Growth + Experimentation
Supervisor AI
Phase 4: Full Autopilot
Approval workflows
PR automation
Continuous optimization
Multi-tenant SaaS
Final Statement
This platform exists to enable:
One-click product launch, AI-driven growth, and autonomous SaaS operation on Google Cloud.
It is:
A Product OS
An AI Product Operator
A Cursor-like experience
A GCP-native automation platform

View File

@@ -0,0 +1,949 @@
1) Recommended reference architecture (Web SaaS-first, 1 product = 1 GCP project per env)
Project model
One product = one GCP project per environment
product-foo-dev
product-foo-staging
product-foo-prod
Optional “platform” projects (yours, not the customers):
productos-control-plane (your backend + tool registry + auth)
productos-observability (optional central dashboards / cross-product rollups)
productos-billing-export (optional BigQuery billing export aggregation)
High-level runtime pattern
IDE + Supervisor AI never touch DBs/services directly.
They call your Control Plane API, which routes to domain Executors (Cloud Run services) with least-privilege service accounts.
VSCodium IDE (Product OS UI) Supervisor AI (Vertex)
\ /
\ /
-----> Control Plane API ----
|
-------------------------------------------------
| | | | |
Deploy Exec Analytics Exec Firestore SQL Exec Marketing Exec
(Cloud Build (BigQuery jobs) Exec Exec (Missinglettr,
+ Cloud Run) (Company (Cloud email provider)
Brain) SQL)
Per-product (customer) project: “product-foo-prod”
Must-have services
Cloud Run: product services + executors (if you deploy executors into product project)
Cloud SQL (Postgres/MySQL): transactional app data
Firestore: config + “Company Brain” + style profiles + run metadata (if you keep metadata per product)
BigQuery: event warehouse + analytics datasets/views + experimentation tables
Pub/Sub: event bus for product events + tool events
Cloud Tasks / Workflows / Scheduler: durable automation + cron-based routines
Secret Manager: tokens, DB creds, OAuth secrets (never in code)
Logging/Monitoring/Trace: observability
Where to place executors
Simplest: executors live in the product project (tight coupling, simple data access)
More “platform”: executors live in your platform project, and access product resources cross-project (strong central control, but more IAM + org policy considerations)
For your “product per project” approach, I recommend:
Deploy executor can live in platform (deploy across projects)
Data executors (SQL/Firestore/BigQuery) often live in product project (least-cross-project permissions)
Data flows
Events: Product apps → Pub/Sub → BigQuery (raw + curated)
Causation/insights: Analytics Exec reads BigQuery → writes Insight Objects to:
BigQuery tables (truth)
GCS artifacts (reports)
Firestore (summary pointers for UI)
Marketing: Marketing Exec pulls Insight Objects + Company Brain → generates campaigns → publishes via Missinglettr/social APIs; stores outputs in GCS + metadata in Firestore
2) Service-by-service IAM roles matrix (least privilege template)
Identities (service accounts)
Youll typically have:
sa-control-plane (platform): routes tool calls, enforces policy, writes run metadata/artifacts
sa-deploy-executor (platform): triggers builds and deploys to Cloud Run in product projects
sa-analytics-executor (product): reads BigQuery + writes insights
sa-firestore-executor (product): reads/writes Company Brain + configs
sa-sql-executor (product): connects to Cloud SQL (plus DB user for SQL-level permissions)
sa-marketing-executor (platform or product): reads insights + calls Missinglettr/email providers; reads secrets
Where I say “product project”, apply it to each env project (dev/staging/prod).
IAM matrix (by service)
Service / Scope Principal Roles (suggested) Notes
Cloud Run (product) sa-deploy-executor roles/run.admin (or narrower), roles/iam.serviceAccountUser (only on the runtime SA), roles/run.invoker (optional) Deploy revisions. Narrow iam.serviceAccountUser to only the runtime SA used by the service being deployed.
Cloud Build (platform or product) sa-deploy-executor roles/cloudbuild.builds.editor (or builds.builder depending on workflow) Triggers builds. Many teams keep builds centralized in platform.
Artifact Registry sa-deploy-executor roles/artifactregistry.writer Push images. If per-product registries, scope accordingly.
Secret Manager (platform/product) sa-marketing-executor, sa-deploy-executor roles/secretmanager.secretAccessor Only for the specific secrets needed.
BigQuery dataset (product) sa-analytics-executor roles/bigquery.dataViewer + roles/bigquery.jobUser Dataset-level grants. Prefer views/curated datasets.
BigQuery dataset (product write) sa-analytics-executor roles/bigquery.dataEditor (only for insight tables dataset) Separate datasets: events_raw (read), events_curated (read), insights (write).
Firestore (product) sa-firestore-executor roles/datastore.user (or roles/datastore.viewer) Use viewer when possible; writer only for Brain/config updates.
Cloud SQL (product) sa-sql-executor roles/cloudsql.client IAM to connect; SQL permissions handled by DB user(s).
Pub/Sub (product) Producers roles/pubsub.publisher For product services emitting events.
Pub/Sub (product) Consumers/executors roles/pubsub.subscriber For analytics/executor ingestion.
Cloud Tasks (product/platform) sa-control-plane or orchestrator roles/cloudtasks.enqueuer + roles/cloudtasks.viewer If you queue tool runs or retries.
Workflows (product/platform) sa-control-plane roles/workflows.invoker For orchestrated multi-step automations.
Cloud Storage (GCS artifacts) sa-control-plane roles/storage.objectAdmin (bucket-level) Write run artifacts; consider objectCreator + separate delete policy if you want immutability.
Cloud Run executors (wherever hosted) sa-control-plane roles/run.invoker Control Plane calls executors over HTTP.
Strongly recommended scoping rules
Grant BigQuery roles at the dataset level, not project level.
Use separate datasets for raw, curated, and insights.
For Cloud SQL, enforce read-only DB users for most endpoints; create a separate writer user only when needed.
Keep a “high risk” policy that requires approval for:
pricing changes
billing actions
production destructive infra
legal/claim-heavy marketing copy
3) Agent tool catalog (seed tool registry mapped to GCP services)
This is a starter “tool universe” your Supervisor AI + IDE can call. Ive grouped by module and listed the backing GCP service.
A) Code module (build/test/deploy)
Tool name Purpose Executes in Backed by
repo.apply_patch Apply diff to repo (local or PR flow) Control Plane / Repo service (GitHub App or local workspace)
repo.open_pr Open PR with changes Control Plane GitHub App
build.run_tests Run unit tests Executor (local/offline or remote) Cloud Build / local runner
cloudrun.deploy_service Build + deploy service Deploy Exec Cloud Build + Cloud Run
cloudrun.rollback_service Roll back revision Deploy Exec Cloud Run
cloudrun.get_service_status Health, revisions, URL Deploy Exec Cloud Run
logs.tail Tail logs for service/run Observability Exec Cloud Logging
B) Marketing module (campaign creation + publishing)
Tool name Purpose Executes in Backed by
brand.get_profile Fetch voice/style/claims Firestore Exec Firestore
brand.update_profile Update voice/style rules Firestore Exec Firestore
marketing.generate_campaign_plan Create campaign plan from insight/product update Marketing Exec Vertex AI (Gemini)
marketing.generate_channel_posts Generate platform-specific posts Marketing Exec Vertex AI (Gemini)
marketing.publish_missinglettr Schedule/publish via Missinglettr Marketing Exec Missinglettr API + Secret Manager
marketing.publish_email Send email campaign Marketing Exec Email provider (SendGrid/etc) + Secret Manager
marketing.store_assets Save creatives/outputs Marketing Exec GCS
marketing.get_campaign_status Poll publish status Marketing Exec Missinglettr / provider APIs
C) Analytics module (events, funnels, causation)
Tool name Purpose Executes in Backed by
events.ingest Ingest events (if you own ingestion endpoint) Analytics/Ingress Exec Pub/Sub + BigQuery
analytics.funnel_summary Funnel metrics Analytics Exec BigQuery
analytics.cohort_retention Retention cohorts Analytics Exec BigQuery
analytics.anomaly_detect Detect anomalies in KPIs Analytics Exec BigQuery / BQML
analytics.top_drivers Feature/sequence drivers Analytics Exec BigQuery / BQML / Vertex
analytics.causal_uplift Uplift/causal impact estimate Analytics Exec BigQuery + Vertex (optional)
analytics.write_insight Persist insight object Analytics Exec BigQuery + Firestore pointer + GCS artifact
D) Growth module (onboarding + lifecycle optimization)
Tool name Purpose Executes in Backed by
growth.identify_dropoffs Identify where users drop Analytics Exec BigQuery
growth.propose_experiment Generate experiment hypothesis/design Growth Exec Gemini + policies
experiments.create Create experiment definition Experiments Exec Firestore/SQL + your assignment service
experiments.evaluate Evaluate results Analytics/Experiments Exec BigQuery
growth.generate_lifecycle_messages Draft onboarding/lifecycle content Marketing/Growth Exec Gemini
E) Support module (feedback + ticket assist)
Tool name Purpose Executes in Backed by
support.ingest_tickets Pull tickets from provider Support Exec Zendesk/Intercom API
support.summarize_ticket Summarize and classify Support Exec Gemini
support.draft_reply Draft response Support Exec Gemini + brand profile
support.update_kb Generate/update KB article Support Exec CMS/Docs + GCS
support.escalate_issue Create issue/task Support Exec GitHub Issues/Jira/etc
F) Infrastructure module (safe, templated ops only)
Tool name Purpose Executes in Backed by
infra.provision_service_template Create a Cloud Run service template Infra Exec Terraform/Cloud APIs
infra.provision_database Create Cloud SQL/Firestore config Infra Exec Cloud SQL / Firestore
infra.provision_pubsub Topics/subscriptions Infra Exec Pub/Sub
infra.rotate_secret Rotate/refresh secrets Infra Exec Secret Manager
infra.cost_report Cost summary for product Analytics/FinOps Exec Billing export → BigQuery
G) Auth module (product auth + internal access)
Tool name Purpose Executes in Backed by
auth.configure_identity_platform Set up end-user auth Auth/Infra Exec Identity Platform
auth.configure_iap Protect internal tools Infra Exec IAP
auth.create_oauth_client Create OAuth creds for integrations Infra Exec Google OAuth / Secret Manager
Recommended “v1 tool registry seed” (small, shippable)
If you want a tight first release, seed only these tools:
Code
cloudrun.deploy_service
cloudrun.get_service_status
logs.tail
Company Brain
brand.get_profile
brand.update_profile
Analytics
analytics.funnel_summary
analytics.top_drivers
analytics.write_insight
Marketing
marketing.generate_channel_posts
marketing.publish_missinglettr
Thats enough to demonstrate the full Product OS loop:
deploy → events → insight → campaign → publish → measure.
If you want, I can also output this as an actual tool-registry.yaml starter file (matching the schema we drafted earlier) with concrete JSON Schemas for each of the v1 tools.
yes gor for it
Heres a starter tool-registry.yaml you can drop into contracts/tool-registry.yaml. It matches the schema we defined earlier and includes concrete JSON Schemas for the v1 seed tools:
cloudrun.deploy_service
cloudrun.get_service_status
logs.tail
brand.get_profile
brand.update_profile
analytics.funnel_summary
analytics.top_drivers
analytics.write_insight
marketing.generate_channel_posts
marketing.publish_missinglettr
Replace the executor.url placeholders with your actual Cloud Run service URLs.
version: 1
tools:
# ----------------------------
# CODE / DEPLOYMENT
# ----------------------------
cloudrun.deploy_service:
description: Build and deploy a Cloud Run service using Cloud Build. Returns the service URL and deployed revision.
risk: medium
executor:
kind: http
url: https://deploy-executor-REPLACE.a.run.app
path: /execute/cloudrun/deploy
inputSchema:
type: object
additionalProperties: false
required: [service_name, repo, ref, env, region]
properties:
service_name:
type: string
minLength: 1
description: Cloud Run service name.
repo:
type: string
minLength: 1
description: Git repo URL (HTTPS).
ref:
type: string
minLength: 1
description: Git ref (branch/tag/SHA).
env:
type: string
enum: [dev, staging, prod]
region:
type: string
minLength: 1
description: GCP region for the Cloud Run service (e.g., us-central1).
build:
type: object
additionalProperties: false
properties:
dockerfile_path:
type: string
default: Dockerfile
build_context:
type: string
default: .
env_vars:
type: object
additionalProperties:
type: string
description: Environment variables to set during build/deploy (non-secret).
deploy:
type: object
additionalProperties: false
properties:
cpu:
type: string
description: Cloud Run CPU (e.g., "1", "2").
memory:
type: string
description: Cloud Run memory (e.g., "512Mi", "1Gi").
min_instances:
type: integer
minimum: 0
max_instances:
type: integer
minimum: 1
concurrency:
type: integer
minimum: 1
timeout_seconds:
type: integer
minimum: 1
maximum: 3600
service_account_email:
type: string
description: Runtime service account email for the Cloud Run service.
allow_unauthenticated:
type: boolean
default: false
outputSchema:
type: object
additionalProperties: false
required: [service_url, revision]
properties:
service_url:
type: string
revision:
type: string
build_id:
type: string
warnings:
type: array
items:
type: string
cloudrun.get_service_status:
description: Fetch Cloud Run service status including latest revision and URL.
risk: low
executor:
kind: http
url: https://deploy-executor-REPLACE.a.run.app
path: /execute/cloudrun/status
inputSchema:
type: object
additionalProperties: false
required: [service_name, region]
properties:
service_name:
type: string
minLength: 1
region:
type: string
minLength: 1
outputSchema:
type: object
additionalProperties: false
required: [service_name, region, service_url, latest_ready_revision, status]
properties:
service_name:
type: string
region:
type: string
service_url:
type: string
latest_ready_revision:
type: string
status:
type: string
enum: [ready, deploying, error, unknown]
last_deploy_time:
type: string
description: ISO timestamp if available.
logs.tail:
description: Tail recent logs for a Cloud Run service or for a specific run_id. Returns log lines (best-effort).
risk: low
executor:
kind: http
url: https://observability-executor-REPLACE.a.run.app
path: /execute/logs/tail
inputSchema:
type: object
additionalProperties: false
required: [scope, limit]
properties:
scope:
type: string
enum: [service, run]
description: Tail logs by service or by tool run.
service_name:
type: string
description: Required if scope=service.
region:
type: string
description: Optional when scope=service, depending on your log query strategy.
run_id:
type: string
description: Required if scope=run.
limit:
type: integer
minimum: 1
maximum: 2000
default: 200
since_seconds:
type: integer
minimum: 1
maximum: 86400
default: 900
outputSchema:
type: object
additionalProperties: false
required: [lines]
properties:
lines:
type: array
items:
type: object
additionalProperties: false
required: [timestamp, text]
properties:
timestamp:
type: string
severity:
type: string
text:
type: string
# ----------------------------
# COMPANY BRAIN (BRAND + STYLE)
# ----------------------------
brand.get_profile:
description: Retrieve the tenant's brand profile (voice, tone, positioning, compliance constraints).
risk: low
executor:
kind: http
url: https://firestore-executor-REPLACE.a.run.app
path: /execute/brand/get_profile
inputSchema:
type: object
additionalProperties: false
required: [profile_id]
properties:
profile_id:
type: string
minLength: 1
description: Brand profile identifier (e.g., "default").
outputSchema:
type: object
additionalProperties: false
required: [profile_id, brand]
properties:
profile_id:
type: string
brand:
type: object
additionalProperties: false
required: [name, voice, audience, claims_policy]
properties:
name:
type: string
voice:
type: object
additionalProperties: false
required: [tone, style_notes, do, dont]
properties:
tone:
type: array
items: { type: string }
style_notes:
type: array
items: { type: string }
do:
type: array
items: { type: string }
dont:
type: array
items: { type: string }
audience:
type: object
additionalProperties: false
properties:
primary:
type: string
secondary:
type: string
claims_policy:
type: object
additionalProperties: false
properties:
forbidden_claims:
type: array
items: { type: string }
required_disclaimers:
type: array
items: { type: string }
compliance_notes:
type: array
items: { type: string }
brand.update_profile:
description: Update the tenant's brand profile. Write operations should be validated and audited.
risk: medium
executor:
kind: http
url: https://firestore-executor-REPLACE.a.run.app
path: /execute/brand/update_profile
inputSchema:
type: object
additionalProperties: false
required: [profile_id, patch]
properties:
profile_id:
type: string
minLength: 1
patch:
type: object
description: Partial update object; executor must validate allowed fields.
outputSchema:
type: object
additionalProperties: false
required: [ok, updated_at]
properties:
ok:
type: boolean
updated_at:
type: string
# ----------------------------
# ANALYTICS / CAUSATION (V1 metrics + drivers)
# ----------------------------
analytics.funnel_summary:
description: Return funnel metrics for a time window. Uses curated events in BigQuery.
risk: low
executor:
kind: http
url: https://analytics-executor-REPLACE.a.run.app
path: /execute/analytics/funnel_summary
inputSchema:
type: object
additionalProperties: false
required: [range_days, funnel]
properties:
range_days:
type: integer
minimum: 1
maximum: 365
funnel:
type: object
additionalProperties: false
required: [name, steps]
properties:
name:
type: string
steps:
type: array
minItems: 2
items:
type: object
additionalProperties: false
required: [event_name]
properties:
event_name:
type: string
filter:
type: object
description: Optional event property filters (executor-defined).
segment:
type: object
description: Optional segment definition (executor-defined).
outputSchema:
type: object
additionalProperties: false
required: [funnel_name, range_days, steps]
properties:
funnel_name:
type: string
range_days:
type: integer
steps:
type: array
items:
type: object
additionalProperties: false
required: [event_name, users, conversion_from_prev]
properties:
event_name:
type: string
users:
type: integer
minimum: 0
conversion_from_prev:
type: number
minimum: 0
maximum: 1
analytics.top_drivers:
description: Identify top correlated drivers for a target metric/event (v1: correlation/feature importance; later: causality).
risk: low
executor:
kind: http
url: https://analytics-executor-REPLACE.a.run.app
path: /execute/analytics/top_drivers
inputSchema:
type: object
additionalProperties: false
required: [range_days, target]
properties:
range_days:
type: integer
minimum: 1
maximum: 365
target:
type: object
additionalProperties: false
required: [metric]
properties:
metric:
type: string
description: Named metric (e.g., "trial_to_paid", "activation_rate") or event-based metric.
event_name:
type: string
description: Optional: if metric is event-based, supply event_name.
candidate_features:
type: array
items:
type: string
description: Optional list of features/properties to consider.
segment:
type: object
description: Optional segmentation.
outputSchema:
type: object
additionalProperties: false
required: [target, range_days, drivers]
properties:
target:
type: object
range_days:
type: integer
drivers:
type: array
items:
type: object
additionalProperties: false
required: [name, score, direction, evidence]
properties:
name:
type: string
score:
type: number
direction:
type: string
enum: [positive, negative, mixed, unknown]
evidence:
type: string
description: Human-readable summary of why this driver matters.
confidence:
type: number
minimum: 0
maximum: 1
analytics.write_insight:
description: Persist an insight object (BigQuery table + Firestore pointer + GCS artifact). Returns an insight_id.
risk: medium
executor:
kind: http
url: https://analytics-executor-REPLACE.a.run.app
path: /execute/analytics/write_insight
inputSchema:
type: object
additionalProperties: false
required: [insight]
properties:
insight:
type: object
additionalProperties: false
required: [type, title, summary, severity, confidence, window, recommendations]
properties:
type:
type: string
enum: [funnel_drop, anomaly, driver, experiment_result, general]
title:
type: string
summary:
type: string
severity:
type: string
enum: [info, low, medium, high, critical]
confidence:
type: number
minimum: 0
maximum: 1
window:
type: object
additionalProperties: false
required: [range_days]
properties:
range_days:
type: integer
minimum: 1
maximum: 365
context:
type: object
description: Arbitrary structured context (metric names, segments, charts pointers).
recommendations:
type: array
minItems: 1
items:
type: object
additionalProperties: false
required: [action, rationale]
properties:
action:
type: string
rationale:
type: string
links:
type: array
items:
type: object
additionalProperties: false
required: [label, url]
properties:
label: { type: string }
url: { type: string }
outputSchema:
type: object
additionalProperties: false
required: [insight_id, stored]
properties:
insight_id:
type: string
stored:
type: object
additionalProperties: false
required: [bigquery, firestore, gcs]
properties:
bigquery:
type: object
additionalProperties: false
required: [dataset, table]
properties:
dataset: { type: string }
table: { type: string }
firestore:
type: object
additionalProperties: false
required: [collection, doc_id]
properties:
collection: { type: string }
doc_id: { type: string }
gcs:
type: object
additionalProperties: false
required: [bucket, prefix]
properties:
bucket: { type: string }
prefix: { type: string }
# ----------------------------
# MARKETING (GENERATION + PUBLISH)
# ----------------------------
marketing.generate_channel_posts:
description: Generate platform-specific social posts from a campaign brief + brand profile.
risk: low
executor:
kind: http
url: https://marketing-executor-REPLACE.a.run.app
path: /execute/marketing/generate_channel_posts
inputSchema:
type: object
additionalProperties: false
required: [brief, channels, brand_profile_id]
properties:
brand_profile_id:
type: string
description: Brand profile id to load (e.g., "default").
brief:
type: object
additionalProperties: false
required: [goal, product, audience, key_points]
properties:
goal:
type: string
description: What outcome are we driving? (e.g., "trial signups")
product:
type: string
audience:
type: string
key_points:
type: array
minItems: 1
items: { type: string }
offer:
type: string
call_to_action:
type: string
landing_page_url:
type: string
channels:
type: array
minItems: 1
items:
type: string
enum: [x, linkedin, facebook, instagram, tiktok, youtube, pinterest, reddit, google_business, mastodon, bluesky, threads]
variations_per_channel:
type: integer
minimum: 1
maximum: 10
default: 3
constraints:
type: object
additionalProperties: false
properties:
max_length:
type: integer
minimum: 50
maximum: 4000
emoji_level:
type: string
enum: [none, light, medium, heavy]
default: light
include_hashtags:
type: boolean
default: true
outputSchema:
type: object
additionalProperties: false
required: [channels]
properties:
channels:
type: array
items:
type: object
additionalProperties: false
required: [channel, posts]
properties:
channel:
type: string
posts:
type: array
items:
type: object
additionalProperties: false
required: [text]
properties:
text: { type: string }
title: { type: string }
alt_text: { type: string }
hashtags:
type: array
items: { type: string }
media_suggestions:
type: array
items: { type: string }
marketing.publish_missinglettr:
description: Publish or schedule a campaign via Missinglettr using stored OAuth/token secrets.
risk: medium
executor:
kind: http
url: https://marketing-executor-REPLACE.a.run.app
path: /execute/marketing/publish_missinglettr
inputSchema:
type: object
additionalProperties: false
required: [campaign, schedule]
properties:
campaign:
type: object
additionalProperties: false
required: [name, posts]
properties:
name:
type: string
posts:
type: array
minItems: 1
items:
type: object
additionalProperties: false
required: [channel, text]
properties:
channel:
type: string
enum: [x, linkedin, facebook, instagram, tiktok, youtube, pinterest, reddit, google_business, mastodon, bluesky, threads]
text:
type: string
media_urls:
type: array
items: { type: string }
link_url:
type: string
schedule:
type: object
additionalProperties: false
required: [mode]
properties:
mode:
type: string
enum: [now, scheduled]
start_time:
type: string
description: ISO timestamp required if mode=scheduled.
timezone:
type: string
default: UTC
idempotency_key:
type: string
description: Optional idempotency key to prevent duplicates.
outputSchema:
type: object
additionalProperties: false
required: [provider, campaign_id, status]
properties:
provider:
type: string
enum: [missinglettr]
campaign_id:
type: string
status:
type: string
enum: [queued, scheduled, published, failed]
provider_response:
type: object
description: Raw provider response (redacted as needed).

View File

@@ -0,0 +1,610 @@
Google Cloud Product OS
Technical Specification
Product-Centric IDE + SaaS Autopilot Platform
1. Purpose
This document defines the technical architecture, components, interfaces, and implementation plan for building a:
Google Cloudnative, Gemini-powered Product Operating System (Product OS)
The platform unifies:
Code development
Product launch
Marketing automation
Analytics and causality
Growth optimization
Support automation
Experimentation
Infrastructure management
into a single product-centric IDE and automation system.
This is not a general-purpose IDE.
It is a Product OS for launching and operating SaaS products on Google Cloud.
2. Core Design Principles
2.1 Product-Centric Orientation
The platform optimizes for:
Shipping products
Launching features
Running marketing
Optimizing growth
Operating infrastructure
Automating decisions
Not for:
Arbitrary coding workflows
Multi-cloud portability
Framework experimentation
2.2 Opinionated for Google Cloud
The platform is single-cloud and deeply integrated with:
Cloud Run
Cloud Build
Artifact Registry
Firestore
Cloud SQL
BigQuery
Pub/Sub
Vertex AI (Gemini)
No AWS or Azure abstraction layers are supported.
2.3 Backend Tool Execution (Security Model)
All automation executes on the backend.
The IDE:
Never runs gcloud
Never runs Terraform
Never holds GCP credentials
Never touches databases directly
Instead:
IDE / Supervisor AI
Control Plane API
Executors
GCP Services
2.4 AI as a Product Operator
The AI is not a coding assistant.
It is a:
Product Operator AI
Responsibilities:
Interpret product goals
Read analytics and insights
Decide actions
Dispatch tools
Enforce policies
Learn from outcomes
3. High-Level Architecture
┌─────────────────────────────┐
│ VSCodium IDE Client │
│ (Product-Centric UI Shell) │
└──────────────┬──────────────┘
┌──────────────────────────┐
│ Control Plane API │
│ (Tool Router + Policy) │
└──────────────┬───────────┘
┌──────────────┬───────────┼─────────────┬──────────────┐
▼ ▼ ▼ ▼ ▼
Deploy Executor Analytics Exec Firestore Exec SQL Exec Missinglettr Exec
Cloud Build+Run BigQuery Firestore Cloud SQL Social Posting
┌──────▼───────┐
│ GCS Store │
│ Artifacts │
└──────────────┘
4. IDE Client Architecture
4.1 Base Editor
VSCodium distribution
OpenVSX marketplace
Preinstalled extensions
Preconfigured settings
Custom UI panels
4.2 Product-Centric Navigation
The IDE must expose:
Product OS
├── Code
├── Marketing
├── Analytics
├── Growth
├── Support
├── Experiments
└── Infrastructure
Each section is:
First-class
AI-assisted
Connected to backend tools
4.3 IDE Responsibilities
The IDE handles:
File editing
Patch preview & application
Project context collection
Tool invocation UI
Artifact viewing
Logs & traces display
The IDE does NOT:
Execute cloud commands
Store secrets
Perform deployments
Perform database queries
5. Control Plane API
5.1 Purpose
The Control Plane is the central orchestration backend.
Responsibilities:
Auth
Tool registry
Tool invocation routing
Policy enforcement
Run tracking
Artifact storage (GCS)
Gemini proxy
5.2 Core Endpoints
POST /tools/invoke
GET /runs/{run_id}
GET /runs/{run_id}/logs
GET /tools
GET /artifacts/{run_id}
5.3 Tool Invocation Contract
Request
{
"tool": "cloudrun.deploy_service",
"tenant_id": "t_123",
"workspace_id": "w_456",
"input": {
"service_name": "marketing-gateway",
"repo": "github.com/org/repo",
"ref": "main",
"env": "prod"
},
"dry_run": false
}
Response
{
"run_id": "run_20260119_abc",
"status": "queued"
}
6. Tool Registry
All executable actions are declared as tools.
6.1 Tool Schema
tools:
cloudrun.deploy_service:
description: Deploy a Cloud Run service
input_schema:
service_name: string
repo: string
ref: string
env: string
output_schema:
service_url: string
risk: medium
executor: deploy-executor
6.2 Registry Responsibilities
Input validation
Output validation
Risk classification
Executor routing
Used by:
IDE
Supervisor AI
Web dashboard
7. Executors (Domain Services)
Each executor is a Cloud Run service with its own service account.
7.1 Deploy Executor
Purpose:
Build and deploy services
Tools:
cloudrun.deploy_service
cloudrun.tail_logs
cloudrun.rollback
GCP APIs:
Cloud Build
Cloud Run
Artifact Registry
IAM:
roles/cloudbuild.builds.editor
roles/run.admin (scoped)
roles/artifactregistry.writer
7.2 Analytics Executor (OpsOS)
Purpose:
Product intelligence and causality
Tools:
analytics.get_funnel_summary
analytics.get_top_drivers
analytics.get_anomalies
GCP APIs:
BigQuery
BigQuery ML
IAM:
roles/bigquery.dataViewer
roles/bigquery.jobUser
7.3 Firestore Executor
Purpose:
Company Brain + configs
Tools:
firestore.get_company_brain
firestore.update_company_brain
GCP APIs:
Firestore
IAM:
roles/datastore.user
7.4 SQL Executor
Purpose:
Transactional summaries
Tools:
sql.get_subscription_summary
sql.get_user_metrics
GCP APIs:
Cloud SQL
IAM:
roles/cloudsql.client
DB-level users
7.5 Missinglettr Executor
Purpose:
Social publishing
Tools:
missinglettr.publish_campaign
missinglettr.get_campaign_status
Secrets:
Missinglettr API tokens
IAM:
roles/secretmanager.secretAccessor
8. Data Storage
8.1 Firestore
Used for:
Company Brain
Tool registry
Policy configs
Style profiles
Run metadata
8.2 GCS
Used for:
Logs
AI outputs
Generated patches
Deployment artifacts
Prompt snapshots
8.3 BigQuery
Used for:
Event warehouse
Funnels
Causality models
Experiment results
9. AI Integration
9.1 Gemini Proxy
All AI calls go through Control Plane.
Responsibilities:
Auth
Rate limiting
Prompt registry
Logging
Cost controls
9.2 AI Patch Contract
Gemini must return:
{
"files": [
{
"path": "src/main.ts",
"diff": "@@ -1,3 +1,6 @@ ..."
}
],
"commands": [
"npm test"
],
"summary": "Add logging middleware"
}
10. IAM Strategy
10.1 Users
OAuth only
No GCP IAM
No key files
10.2 Backend
Workload identity
No long-lived keys
Least privilege
Per-executor roles
11. Supported Languages
TypeScript / Node
Python
No additional languages in v1.
12. SaaS Autopilot Layer
A Supervisor AI Agent runs in Vertex AI Agent Designer.
It calls the same tools as the IDE.
Supervisor AI → Control Plane → Executors
13. Non-Goals
The platform does NOT:
Replace VS Code generically
Support all frameworks
Support multi-cloud
Allow raw IAM editing
Execute cloud commands locally
14. Repository Structure
/platform
/client-ide
/vscodium
/extensions
/backend
/control-plane
/executors
/contracts
/infra
/docs
15. Implementation Phases
Phase 1 Core
Control Plane API
Deploy Executor
Gemini Proxy
IDE Deploy UI
Phase 2 Intelligence
Firestore Executor
Analytics Executor
Funnel + driver tools
Phase 3 Automation
Missinglettr Executor
Growth + Experiments
Supervisor AI
16. Final Statement
This system is a:
Google Cloudnative Product Operating System
for launching, growing, and automating SaaS products
using Gemini and backend-controlled automation.
Optional Next Steps
Generate Control Plane API scaffold
Generate Tool Registry schema
Generate VSCodium extension skeleton
Generate Terraform base
If you want, I can next generate:
The Control Plane API OpenAPI spec
The Tool Registry schema file
The First Executor service skeleton
The VSCodium extension skeleton
Tell me which one you want first.

View File

@@ -0,0 +1,289 @@
Final Direction Summary: Replacing Cursor for Your Use Case
Core Goal
You want:
A Cursor-like chat experience
Integrated with:
your codebase
Google Cloud services
your product workflows
Without paying for Cursor or depending on OpenAI/Cursor infra.
We aligned on an approach that gives you this, while keeping costs, maintenance, and risk manageable.
The Chosen Architecture
1. Use VSCodium as your editor base
Instead of Cursor or VS Code:
Open-source
Redistributable
No telemetry/licensing issues
Compatible with VS Code extensions
Lets you ship your own IDE experience
You are not building a new editor, you are building a product cockpit on top of a proven editor shell.
2. Build your product experience as an Extension (not a fork)
We agreed:
Extension-first is the right V1 strategy.
Because with an extension you can:
Add your own Product OS UI
Build your own chat interface
Integrate Gemini + GCP + tools
Ship cross-platform quickly
Avoid the heavy maintenance cost of a fork
A fork only becomes justified later if you need:
Hard shell changes
Locked-down layouts
Enterprise kiosk behavior
3. Use an Open-Source Chat UI Instead of Cursor
To avoid building chat UI from scratch, we landed on:
✅ Best starting point: Open-source chat extensions
You can reuse or extend:
Option A (Recommended)
Copilot Chat UI (open-sourced by Microsoft)
Production-grade chat UI
MIT license
Can be repointed to:
your backend
Gemini / Vertex AI
Gives you:
streaming responses
history
context-aware UX
Option B (Fast prototyping)
Continue
Open-source
Already works in VSCodium
Can connect to:
local LLMs
remote APIs (your Gemini backend)
Great for validating UX quickly
This gives you:
A Cursor-like chat UX without Cursor.
4. Gemini + Control Plane replaces Cursors backend
Instead of:
Cursor → OpenAI → Cursor tools
You will have:
VSCodium → Your Extension → Control Plane → Gemini (Vertex AI) + GCP Tools
Your backend becomes the intelligence layer:
/chat endpoint → Gemini
/tools/invoke → deploy, logs, analytics, campaigns, etc
policy enforcement
cost tracking
product-aware reasoning
This gives you:
full ownership
no vendor lock-in
better monetization control
5. Code Generation Does NOT require rebuilding everything
We clarified:
You do NOT need to rebuild a full editor or execution engine to generate code.
You only need:
Minimal tooling:
Model returns:
structured diffs
optional commands
Extension:
previews changes
applies patches
optionally runs tests
Everything else (editing, git, terminals) is already provided by VSCodium.
So you get:
Cursor-like “generate code and apply it” behavior
without building Cursor from scratch.
6. Direct Cloud Access: Use Signed URLs, Not Service Accounts
We aligned on:
Dont give the IDE persistent cloud credentials
Use:
Control Plane → signed URLs → GCS
This gives you:
better security
easier monetization
easy migration later
avoids long-term risk
You can still have:
Direct data transfer
without exposing cloud identities.
7. Product OS > Code Chat Only
Youre not just building a “code helper chat”.
Youre building a Product OS, where chat can:
generate code
deploy services
analyze funnels
generate campaigns
summarize experiments
optimize onboarding
respond to support tickets
Thats your differentiator over Cursor:
Cursor is a coding assistant
Youre building a product automation cockpit
What This Means Practically
You will:
Run VSCodium
Install:
Your Product OS extension
An open-source chat UI (or embed it)
Connect it to:
Your Control Plane
Gemini on Vertex AI
Add:
Tool invocation
Product modules (marketing, analytics, growth, etc)
Ship:
A Cursor-free AI IDE focused on launching and running products
What You Avoid
By this approach, you avoid:
Paying Cursor per seat
Being locked into OpenAI routing
Forking VS Code prematurely
Owning an editor platform too early
Maintaining a custom compiler/distribution pipeline
Final Position
You do not need Cursor.
You can build:
A great chat interface
With code + GCP integration
On VSCodium
With open-source UI
Powered by Gemini
And fully controlled by you
If youd like, next I can:
Lay out a concrete build roadmap (V1 → V3)
Or give you a minimal stack diagram + repo layout
Or produce a starter technical spec for your Product OS Chat + Tooling platform