+
+
+ {#if error}
+
{error}
+ {/if}
+
+ {#if loading}
+
Lade …
+ {:else}
+ {#each flatProviders() as provider (provider.id)}
+ {@const cfg = configFor(provider.id)}
+ {@const form = forms[provider.id]}
+
+ {/each}
+ {/if}
+
+
+
diff --git a/services/mana-research/src/db/schema/research.ts b/services/mana-research/src/db/schema/research.ts
index 3e9d210cf..1b537d2a9 100644
--- a/services/mana-research/src/db/schema/research.ts
+++ b/services/mana-research/src/db/schema/research.ts
@@ -129,3 +129,39 @@ export type EvalResult = typeof evalResults.$inferSelect;
export type NewEvalResult = typeof evalResults.$inferInsert;
export type ProviderConfig = typeof providerConfigs.$inferSelect;
export type ProviderStat = typeof providerStats.$inferSelect;
+
+export const asyncJobStatusEnum = pgEnum('research_async_status', [
+ 'queued',
+ 'running',
+ 'completed',
+ 'failed',
+ 'cancelled',
+]);
+
+/** Long-running research tasks (openai-deep-research). User submits, polls. */
+export const asyncJobs = researchSchema.table(
+ 'async_jobs',
+ {
+ id: uuid('id').primaryKey().defaultRandom(),
+ userId: text('user_id').notNull(),
+ providerId: text('provider_id').notNull(),
+ externalId: text('external_id'),
+ status: asyncJobStatusEnum('status').notNull().default('queued'),
+ query: text('query').notNull(),
+ options: jsonb('options'),
+ reservationId: text('reservation_id'),
+ costCredits: integer('cost_credits').notNull().default(0),
+ result: jsonb('result'),
+ errorMessage: text('error_message'),
+ runId: uuid('run_id').references(() => evalRuns.id, { onDelete: 'set null' }),
+ createdAt: timestamp('created_at', { withTimezone: true }).defaultNow().notNull(),
+ updatedAt: timestamp('updated_at', { withTimezone: true }).defaultNow().notNull(),
+ },
+ (t) => ({
+ userIdx: index('async_jobs_user_idx').on(t.userId, t.createdAt),
+ statusIdx: index('async_jobs_status_idx').on(t.status),
+ })
+);
+
+export type AsyncJob = typeof asyncJobs.$inferSelect;
+export type NewAsyncJob = typeof asyncJobs.$inferInsert;
diff --git a/services/mana-research/src/index.ts b/services/mana-research/src/index.ts
index cf4630dda..54e3cbb53 100644
--- a/services/mana-research/src/index.ts
+++ b/services/mana-research/src/index.ts
@@ -21,9 +21,11 @@ import { createExtractRoutes } from './routes/extract';
import { createResearchRoutes } from './routes/research';
import { createProvidersRoutes } from './routes/providers';
import { createRunsRoutes } from './routes/runs';
+import { createProviderConfigRoutes } from './routes/provider-configs';
import { buildRegistry } from './providers/registry';
import { RunStorage } from './storage/runs';
import { ConfigStorage } from './storage/configs';
+import { AsyncJobStorage } from './storage/async-jobs';
import { CreditsClient } from './clients/mana-credits';
import { ManaSearchClient } from './clients/mana-search';
import { ManaLlmClient } from './clients/mana-llm';
@@ -45,6 +47,7 @@ const credits = new CreditsClient({
const runStorage = new RunStorage(db);
const configStorage = new ConfigStorage(db);
+const asyncStorage = new AsyncJobStorage(db);
const registry = buildRegistry({ manaSearch });
const executorDeps = {
@@ -86,11 +89,17 @@ app.use('/api/v1/extract/*', jwtAuth(config.manaAuthUrl));
app.route('/api/v1/extract', createExtractRoutes(registry, runStorage, executorDeps, config));
app.use('/api/v1/research/*', jwtAuth(config.manaAuthUrl));
-app.route('/api/v1/research', createResearchRoutes(registry, runStorage, executorDeps, config));
+app.route(
+ '/api/v1/research',
+ createResearchRoutes(registry, runStorage, executorDeps, config, asyncStorage, credits)
+);
app.use('/api/v1/runs/*', jwtAuth(config.manaAuthUrl));
app.route('/api/v1/runs', createRunsRoutes(runStorage));
+app.use('/api/v1/provider-configs/*', jwtAuth(config.manaAuthUrl));
+app.route('/api/v1/provider-configs', createProviderConfigRoutes(db));
+
// Service-to-service (X-Service-Key auth) — wired up in Phase 3 when mana-ai migrates
app.use('/api/v1/internal/*', serviceAuth(config.serviceKey));
app.get('/api/v1/internal/health', (c) => c.json({ ok: true }));
diff --git a/services/mana-research/src/providers/agent/openai-deep-research.ts b/services/mana-research/src/providers/agent/openai-deep-research.ts
new file mode 100644
index 000000000..8df5c04f6
--- /dev/null
+++ b/services/mana-research/src/providers/agent/openai-deep-research.ts
@@ -0,0 +1,169 @@
+/**
+ * OpenAI Deep Research — async via the Responses API with `background: true`.
+ * Docs: https://platform.openai.com/docs/guides/deep-research
+ *
+ * Two-phase flow:
+ * submit() — POST /v1/responses → returns { id, status: 'queued' | 'in_progress' }
+ * poll(id) — GET /v1/responses/{id} → eventual { status: 'completed', output: [...] }
+ *
+ * Results typically arrive in 5–30 minutes. We persist the OpenAI response.id
+ * in research.async_jobs and expose POST /v1/research/async + GET /:taskId.
+ */
+
+import type { AgentAnswer, Citation } from '@mana/shared-research';
+import { ProviderError, ProviderNotConfiguredError } from '../../lib/errors';
+
+const DEFAULT_MODEL = 'o3-deep-research';
+
+export interface DeepResearchSubmitResult {
+ externalId: string;
+ status: 'queued' | 'running';
+}
+
+export interface DeepResearchPollResult {
+ status: 'queued' | 'running' | 'completed' | 'failed';
+ answer?: AgentAnswer;
+ error?: string;
+}
+
+interface OpenAISubmitResponse {
+ id: string;
+ status?: 'queued' | 'in_progress' | 'completed' | 'failed' | 'cancelled' | 'incomplete';
+ error?: { message?: string };
+}
+
+interface OpenAIPollResponse extends OpenAISubmitResponse {
+ output?: Array<{
+ type: string;
+ role?: string;
+ content?: Array<{
+ type: string;
+ text?: string;
+ annotations?: Array<{
+ type: string;
+ url?: string;
+ title?: string;
+ }>;
+ }>;
+ }>;
+ output_text?: string;
+ usage?: {
+ input_tokens?: number;
+ output_tokens?: number;
+ };
+}
+
+export async function submitDeepResearch(
+ query: string,
+ options: { model?: string; maxTokens?: number; systemPrompt?: string } = {},
+ apiKey: string | null,
+ signal?: AbortSignal
+): Promise