diff --git a/.changeset/ai-sdk-chat-transport.md b/.changeset/ai-sdk-chat-transport.md new file mode 100644 index 00000000000..f5cdb9187d4 --- /dev/null +++ b/.changeset/ai-sdk-chat-transport.md @@ -0,0 +1,42 @@ +--- +"@trigger.dev/sdk": minor +--- + +Add AI SDK chat transport integration via two new subpath exports: + +**`@trigger.dev/sdk/chat`** (frontend, browser-safe): +- `TriggerChatTransport` — custom `ChatTransport` for the AI SDK's `useChat` hook that runs chat completions as durable Trigger.dev tasks +- `createChatTransport()` — factory function + +```tsx +import { useChat } from "@ai-sdk/react"; +import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; + +const { messages, sendMessage } = useChat({ + transport: new TriggerChatTransport({ + task: "my-chat-task", + accessToken, + }), +}); +``` + +**`@trigger.dev/sdk/ai`** (backend, extends existing `ai.tool`/`ai.currentToolOptions`): +- `chatTask()` — pre-typed task wrapper with auto-pipe support +- `pipeChat()` — pipe a `StreamTextResult` or stream to the frontend +- `CHAT_STREAM_KEY` — the default stream key constant +- `ChatTaskPayload` type + +```ts +import { chatTask } from "@trigger.dev/sdk/ai"; +import { streamText, convertToModelMessages } from "ai"; + +export const myChatTask = chatTask({ + id: "my-chat-task", + run: async ({ messages }) => { + return streamText({ + model: openai("gpt-4o"), + messages: convertToModelMessages(messages), + }); + }, +}); +``` diff --git a/.changeset/ai-tool-execute-helper.md b/.changeset/ai-tool-execute-helper.md new file mode 100644 index 00000000000..6f7b8914504 --- /dev/null +++ b/.changeset/ai-tool-execute-helper.md @@ -0,0 +1,5 @@ +--- +"@trigger.dev/sdk": patch +--- + +Add `ai.toolExecute(task)` so you can pass Trigger's subtask/metadata wiring as the `execute` handler to AI SDK `tool()` while defining `description` and `inputSchema` yourself. Refactors `ai.tool()` to share the same internal handler. diff --git a/.changeset/ai-tool-toolset-typing.md b/.changeset/ai-tool-toolset-typing.md new file mode 100644 index 00000000000..de67be637f3 --- /dev/null +++ b/.changeset/ai-tool-toolset-typing.md @@ -0,0 +1,6 @@ +--- +"@trigger.dev/sdk": patch +--- + +Align `ai.tool()` (`toolFromTask`) with the AI SDK `ToolSet` shape: Zod-backed tasks use static `tool()`; returns are asserted as `Tool & ToolSet[string]`. Raise the SDK's minimum `ai` devDependency to `^6.0.116` so emitted types resolve the same `ToolSet` as apps on AI SDK 6.0.x (avoids cross-version `ToolSet` mismatches in monorepos). + diff --git a/.changeset/chat-run-pat-renewal.md b/.changeset/chat-run-pat-renewal.md new file mode 100644 index 00000000000..8d4b6cb80ea --- /dev/null +++ b/.changeset/chat-run-pat-renewal.md @@ -0,0 +1,6 @@ +--- +"@trigger.dev/core": patch +"@trigger.dev/sdk": patch +--- + +Add run-scoped PAT renewal for chat transport (`renewRunAccessToken`), fail fast on 401/403 for SSE without retry backoff, and export `isTriggerRealtimeAuthError` for auth-error detection. diff --git a/.changeset/dry-sloths-divide.md b/.changeset/dry-sloths-divide.md new file mode 100644 index 00000000000..31e7ec9b941 --- /dev/null +++ b/.changeset/dry-sloths-divide.md @@ -0,0 +1,5 @@ +--- +"@trigger.dev/sdk": patch +--- + +Add `chat.withUIMessage()` for typed AI SDK `UIMessage` in chat task hooks, optional factory `streamOptions` merged with `uiMessageStreamOptions`, and `InferChatUIMessage` helper. Generic `ChatUIMessageStreamOptions`, compaction, and pending-message event types. `usePendingMessages` accepts a UI message type parameter; re-export `InferChatUIMessage` from `@trigger.dev/sdk/chat/react`. diff --git a/.claude/rules/package-installation.md b/.claude/rules/package-installation.md new file mode 100644 index 00000000000..310074823c5 --- /dev/null +++ b/.claude/rules/package-installation.md @@ -0,0 +1,22 @@ +--- +paths: + - "**/package.json" +--- + +# Installing Packages + +When adding a new dependency to any package.json in the monorepo: + +1. **Look up the latest version** on npm before adding: + ```bash + pnpm view version + ``` + If unsure which version to use (e.g. major version compatibility), confirm with the user. + +2. **Edit the package.json directly** — do NOT use `pnpm add` as it can cause issues in the monorepo. Add the dependency with the correct version range (typically `^x.y.z`). + +3. **Run `pnpm i` from the repo root** after editing to install and update the lockfile: + ```bash + pnpm i + ``` + Always run from the repo root, not from the package directory. diff --git a/CLAUDE.md b/CLAUDE.md index 9e53955b092..dd7d27d350c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -6,6 +6,8 @@ This file provides guidance to Claude Code when working with this repository. Su This is a pnpm 10.23.0 monorepo using Turborepo. Run commands from root with `pnpm run`. +**Adding dependencies:** Edit `package.json` directly instead of using `pnpm add`, then run `pnpm i` from the repo root. See `.claude/rules/package-installation.md` for the full process. + ```bash pnpm run docker # Start Docker services (PostgreSQL, Redis, Electric) pnpm run db:migrate # Run database migrations diff --git a/packages/core/src/v3/apiClient/errors.ts b/packages/core/src/v3/apiClient/errors.ts index 14f69b31302..5f38a4947b8 100644 --- a/packages/core/src/v3/apiClient/errors.ts +++ b/packages/core/src/v3/apiClient/errors.ts @@ -128,6 +128,18 @@ export class PermissionDeniedError extends ApiError { override readonly status: 403 = 403; } +/** + * True when `error` is a 401/403 from the Trigger API (e.g. expired run-scoped PAT on realtime streams). + * Uses structural checks so it works even if multiple copies of `@trigger.dev/core` are bundled (subclass `instanceof` can fail). + */ +export function isTriggerRealtimeAuthError(error: unknown): boolean { + if (error === null || typeof error !== "object") { + return false; + } + const e = error as ApiError; + return e.name === "TriggerApiError" && (e.status === 401 || e.status === 403); +} + export class NotFoundError extends ApiError { override readonly status: 404 = 404; } diff --git a/packages/core/src/v3/apiClient/runStream.ts b/packages/core/src/v3/apiClient/runStream.ts index 520ecd8dc2b..61ca019368c 100644 --- a/packages/core/src/v3/apiClient/runStream.ts +++ b/packages/core/src/v3/apiClient/runStream.ts @@ -14,7 +14,7 @@ import { IOPacket, parsePacket, } from "../utils/ioSerialization.js"; -import { ApiError } from "./errors.js"; +import { ApiError, isTriggerRealtimeAuthError } from "./errors.js"; import { ApiClient } from "./index.js"; import { zodShapeStream } from "./stream.js"; @@ -344,6 +344,12 @@ export class SSEStreamSubscription implements StreamSubscription { return; } + if (isTriggerRealtimeAuthError(error)) { + this.options.onError?.(error as Error); + controller.error(error as Error); + return; + } + // Retry on error await this.retryConnection(controller, error as Error); } diff --git a/packages/core/src/v3/index.ts b/packages/core/src/v3/index.ts index 2757363f4be..883da288556 100644 --- a/packages/core/src/v3/index.ts +++ b/packages/core/src/v3/index.ts @@ -80,6 +80,7 @@ export { getSchemaParseFn, type AnySchemaParseFn, type SchemaParseFn, + type inferSchemaOut, isSchemaZodEsque, isSchemaValibotEsque, isSchemaArkTypeEsque, diff --git a/packages/core/src/v3/inputStreams/index.ts b/packages/core/src/v3/inputStreams/index.ts index 4a871d6bfcc..0b3c7af063f 100644 --- a/packages/core/src/v3/inputStreams/index.ts +++ b/packages/core/src/v3/inputStreams/index.ts @@ -51,6 +51,18 @@ export class InputStreamsAPI implements InputStreamManager { return this.#getManager().lastSeqNum(streamId); } + public setLastSeqNum(streamId: string, seqNum: number): void { + this.#getManager().setLastSeqNum(streamId, seqNum); + } + + public shiftBuffer(streamId: string): boolean { + return this.#getManager().shiftBuffer(streamId); + } + + public disconnectStream(streamId: string): void { + this.#getManager().disconnectStream(streamId); + } + public clearHandlers(): void { this.#getManager().clearHandlers(); } diff --git a/packages/core/src/v3/inputStreams/manager.ts b/packages/core/src/v3/inputStreams/manager.ts index f393f4a169a..09212fb6a84 100644 --- a/packages/core/src/v3/inputStreams/manager.ts +++ b/packages/core/src/v3/inputStreams/manager.ts @@ -40,6 +40,26 @@ export class StandardInputStreamManager implements InputStreamManager { return this.seqNums.get(streamId); } + setLastSeqNum(streamId: string, seqNum: number): void { + const current = this.seqNums.get(streamId); + // Only advance forward, never backward + if (current === undefined || seqNum > current) { + this.seqNums.set(streamId, seqNum); + } + } + + shiftBuffer(streamId: string): boolean { + const buffered = this.buffer.get(streamId); + if (buffered && buffered.length > 0) { + buffered.shift(); + if (buffered.length === 0) { + this.buffer.delete(streamId); + } + return true; + } + return false; + } + setRunId(runId: string, streamsVersion?: string): void { this.currentRunId = runId; this.streamsVersion = streamsVersion; @@ -158,6 +178,15 @@ export class StandardInputStreamManager implements InputStreamManager { } } + disconnectStream(streamId: string): void { + const tail = this.tails.get(streamId); + if (tail) { + tail.abortController.abort(); + this.tails.delete(streamId); + } + this.buffer.delete(streamId); + } + connectTail(runId: string, _fromSeq?: number): void { // No-op: tails are now created per-stream lazily } diff --git a/packages/core/src/v3/inputStreams/noopManager.ts b/packages/core/src/v3/inputStreams/noopManager.ts index 6d72d9e2f76..612da832d7e 100644 --- a/packages/core/src/v3/inputStreams/noopManager.ts +++ b/packages/core/src/v3/inputStreams/noopManager.ts @@ -22,6 +22,12 @@ export class NoopInputStreamManager implements InputStreamManager { return undefined; } + setLastSeqNum(_streamId: string, _seqNum: number): void {} + + shiftBuffer(_streamId: string): boolean { return false; } + + disconnectStream(_streamId: string): void {} + clearHandlers(): void {} reset(): void {} disconnect(): void {} diff --git a/packages/core/src/v3/inputStreams/types.ts b/packages/core/src/v3/inputStreams/types.ts index 0816c06493f..c456bb61216 100644 --- a/packages/core/src/v3/inputStreams/types.ts +++ b/packages/core/src/v3/inputStreams/types.ts @@ -70,6 +70,28 @@ export interface InputStreamManager { */ lastSeqNum(streamId: string): number | undefined; + /** + * Advance the last-seen S2 sequence number for the given input stream. + * Used after `.wait()` resumes to prevent the SSE tail from replaying + * the record that was consumed via the waitpoint path. + */ + setLastSeqNum(streamId: string, seqNum: number): void; + + /** + * Remove and discard the first buffered item for the given input stream. + * Used after `.wait()` resumes to remove the duplicate that the SSE tail + * buffered while the waitpoint was being completed via a separate path. + * Returns true if an item was removed, false if the buffer was empty. + */ + shiftBuffer(streamId: string): boolean; + + /** + * Disconnect the SSE tail and clear the buffer for a specific input stream. + * Used before suspending via `.wait()` so the tail doesn't buffer duplicates + * of data that will be delivered through the waitpoint path. + */ + disconnectStream(streamId: string): void; + /** * Clear all persistent `.on()` handlers and abort tails that have no remaining once waiters. * Called automatically when a task run completes. diff --git a/packages/core/src/v3/realtimeStreams/manager.ts b/packages/core/src/v3/realtimeStreams/manager.ts index 323735df106..beda3535fb4 100644 --- a/packages/core/src/v3/realtimeStreams/manager.ts +++ b/packages/core/src/v3/realtimeStreams/manager.ts @@ -6,6 +6,7 @@ import { RealtimeStreamInstance, RealtimeStreamOperationOptions, RealtimeStreamsManager, + StreamWriteResult, } from "./types.js"; export class StandardRealtimeStreamsManager implements RealtimeStreamsManager { @@ -16,7 +17,7 @@ export class StandardRealtimeStreamsManager implements RealtimeStreamsManager { ) {} // Track active streams - using a Set allows multiple streams for the same key to coexist private activeStreams = new Set<{ - wait: () => Promise; + wait: () => Promise; abortController: AbortController; }>(); diff --git a/packages/core/src/v3/realtimeStreams/noopManager.ts b/packages/core/src/v3/realtimeStreams/noopManager.ts index 542e66fd53a..881a82294e2 100644 --- a/packages/core/src/v3/realtimeStreams/noopManager.ts +++ b/packages/core/src/v3/realtimeStreams/noopManager.ts @@ -15,7 +15,7 @@ export class NoopRealtimeStreamsManager implements RealtimeStreamsManager { options?: RealtimeStreamOperationOptions ): RealtimeStreamInstance { return { - wait: () => Promise.resolve(), + wait: () => Promise.resolve({}), get stream(): AsyncIterableStream { return createAsyncIterableStreamFromAsyncIterable(source); }, diff --git a/packages/core/src/v3/realtimeStreams/streamInstance.ts b/packages/core/src/v3/realtimeStreams/streamInstance.ts index 6d8106ffe6c..07ee0158bfb 100644 --- a/packages/core/src/v3/realtimeStreams/streamInstance.ts +++ b/packages/core/src/v3/realtimeStreams/streamInstance.ts @@ -3,7 +3,7 @@ import { AsyncIterableStream } from "../streams/asyncIterableStream.js"; import { AnyZodFetchOptions } from "../zodfetch.js"; import { StreamsWriterV1 } from "./streamsWriterV1.js"; import { StreamsWriterV2 } from "./streamsWriterV2.js"; -import { StreamsWriter } from "./types.js"; +import { StreamsWriter, StreamWriteResult } from "./types.js"; export type StreamInstanceOptions = { apiClient: ApiClient; @@ -63,8 +63,9 @@ export class StreamInstance implements StreamsWriter { return streamWriter; } - public async wait(): Promise { - return this.streamPromise.then((writer) => writer.wait()); + public async wait(): Promise { + const writer = await this.streamPromise; + return writer.wait(); } public get stream(): AsyncIterableStream { diff --git a/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts b/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts index 2f2b4af1682..c19faf6c2f8 100644 --- a/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts +++ b/packages/core/src/v3/realtimeStreams/streamsWriterV1.ts @@ -2,7 +2,7 @@ import { request as httpsRequest } from "node:https"; import { request as httpRequest } from "node:http"; import { URL } from "node:url"; import { randomBytes } from "node:crypto"; -import { StreamsWriter } from "./types.js"; +import { StreamsWriter, StreamWriteResult } from "./types.js"; export type StreamsWriterV1Options = { baseUrl: string; @@ -258,8 +258,9 @@ export class StreamsWriterV1 implements StreamsWriter { await this.makeRequest(0); } - public async wait(): Promise { - return this.streamPromise; + public async wait(): Promise { + await this.streamPromise; + return {}; } public [Symbol.asyncIterator]() { diff --git a/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts b/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts index 91713630dbe..4d01c8267d8 100644 --- a/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts +++ b/packages/core/src/v3/realtimeStreams/streamsWriterV2.ts @@ -1,5 +1,5 @@ import { S2, AppendRecord, BatchTransform } from "@s2-dev/streamstore"; -import { StreamsWriter } from "./types.js"; +import { StreamsWriter, StreamWriteResult } from "./types.js"; import { nanoid } from "nanoid"; export type StreamsWriterV2Options = { @@ -54,6 +54,7 @@ export class StreamsWriterV2 implements StreamsWriter { private readonly maxInflightBytes: number; private aborted = false; private sessionWritable: WritableStream | null = null; + private lastSeqNum: number | undefined; constructor(private options: StreamsWriterV2Options) { this.debug = options.debug ?? false; @@ -169,9 +170,9 @@ export class StreamsWriterV2 implements StreamsWriter { const lastAcked = session.lastAckedPosition(); if (lastAcked?.end) { - const recordsWritten = lastAcked.end.seqNum; + this.lastSeqNum = lastAcked.end.seqNum; this.log( - `[S2MetadataStream] Written ${recordsWritten} records, ending at seqNum=${lastAcked.end.seqNum}` + `[S2MetadataStream] Written ${this.lastSeqNum} records, ending at seqNum=${this.lastSeqNum}` ); } } catch (error) { @@ -184,8 +185,9 @@ export class StreamsWriterV2 implements StreamsWriter { } } - public async wait(): Promise { + public async wait(): Promise { await this.streamPromise; + return { lastEventId: this.lastSeqNum?.toString() }; } public [Symbol.asyncIterator]() { diff --git a/packages/core/src/v3/realtimeStreams/types.ts b/packages/core/src/v3/realtimeStreams/types.ts index 174970c2830..a5c0b37fb1e 100644 --- a/packages/core/src/v3/realtimeStreams/types.ts +++ b/packages/core/src/v3/realtimeStreams/types.ts @@ -26,13 +26,17 @@ export interface RealtimeStreamsManager { ): Promise; } +export type StreamWriteResult = { + lastEventId?: string; +}; + export interface RealtimeStreamInstance { - wait(): Promise; + wait(): Promise; get stream(): AsyncIterableStream; } export interface StreamsWriter { - wait(): Promise; + wait(): Promise; } export type RealtimeDefinedStream = { @@ -71,6 +75,10 @@ export type PipeStreamOptions = { * Additional request options for the API call. */ requestOptions?: ApiRequestOptions; + /** Override the default span name for this operation. */ + spanName?: string; + /** When true, the span will be collapsed in the dashboard. */ + collapsed?: boolean; }; /** @@ -89,7 +97,7 @@ export type PipeStreamResult = { * to the realtime stream. Use this to wait for the stream to complete before * finishing your task. */ - waitUntilComplete: () => Promise; + waitUntilComplete: () => Promise; }; /** @@ -185,6 +193,14 @@ export type RealtimeDefinedInputStream = { * Uses a waitpoint token internally. Can only be called inside a task.run(). */ wait: (options?: InputStreamWaitOptions) => ManualWaitpointPromise; + /** + * Wait for data with an idle phase before suspending. + * + * Keeps the task active (using compute) for `idleTimeoutInSeconds`, + * then suspends via `.wait()` if no data arrives. If data arrives during + * the idle phase the task responds instantly without suspending. + */ + waitWithIdleTimeout: (options: InputStreamWaitWithIdleTimeoutOptions) => Promise<{ ok: true; output: TData } | { ok: false; error?: any }>; /** * Send data to this input stream on a specific run. * This is used from outside the task (e.g., from your backend or another task). @@ -199,6 +215,8 @@ export type InputStreamSubscription = { export type InputStreamOnceOptions = { signal?: AbortSignal; timeoutMs?: number; + /** Override the default span name for this operation. */ + spanName?: string; }; export type SendInputStreamOptions = { @@ -234,6 +252,18 @@ export type InputStreamWaitOptions = { * and filtering waitpoints via `wait.listTokens()`. */ tags?: string[]; + + /** Override the default span name for this operation. */ + spanName?: string; +}; + +export type InputStreamWaitWithIdleTimeoutOptions = { + /** Seconds to keep the task idle (active, using compute) before suspending. */ + idleTimeoutInSeconds: number; + /** Maximum time to wait after suspending (duration string, e.g. "1h"). */ + timeout?: string; + /** Override the default span name for the outer operation. */ + spanName?: string; }; export type InferInputStreamType = T extends RealtimeDefinedInputStream diff --git a/packages/core/src/v3/types/tasks.ts b/packages/core/src/v3/types/tasks.ts index 184aebe78a2..582ff0d941a 100644 --- a/packages/core/src/v3/types/tasks.ts +++ b/packages/core/src/v3/types/tasks.ts @@ -618,6 +618,30 @@ export interface Task requestOptions?: TriggerApiRequestOptions ) => TaskRunPromise; + /** + * Trigger a task and subscribe to its updates via realtime. Unlike `triggerAndWait`, + * this does NOT suspend the parent run — the parent stays alive and polls for updates. + * This enables parallel tool calls and proper abort signal handling. + * + * @param payload + * @param options - Options for the task run, including an optional `signal` to cancel the subscription and child run + * @returns TaskRunPromise + * @example + * ``` + * const result = await task.triggerAndSubscribe({ foo: "bar" }, { signal: abortSignal }); + * + * if (result.ok) { + * console.log(result.output); + * } else { + * console.error(result.error); + * } + * ``` + */ + triggerAndSubscribe: ( + payload: TInput, + options?: TriggerAndSubscribeOptions, + ) => TaskRunPromise; + /** * Batch trigger multiple task runs with the given payloads, and wait for the results. Returns the results of the task runs. * @param items - Array, AsyncIterable, or ReadableStream of batch items @@ -966,6 +990,16 @@ export type TriggerOptions = { }; export type TriggerAndWaitOptions = Omit; + +export type TriggerAndSubscribeOptions = Omit & { + /** An AbortSignal to cancel the subscription. When fired, the subscription closes and the promise rejects. */ + signal?: AbortSignal; + /** + * Whether to cancel the child run when the abort signal fires. + * @default true + */ + cancelOnAbort?: boolean; +}; export type BatchTriggerOptions = { /** * If no idempotencyKey is set on an individual item in the batch, it will use this key on each item + the array index. diff --git a/packages/core/test/runStream.test.ts b/packages/core/test/runStream.test.ts index 0bf7f17432c..a953b7b694b 100644 --- a/packages/core/test/runStream.test.ts +++ b/packages/core/test/runStream.test.ts @@ -1,7 +1,8 @@ -import { describe, expect, it } from "vitest"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { RunSubscription, SSEStreamPart, + SSEStreamSubscription, StreamSubscription, StreamSubscriptionFactory, } from "../src/v3/apiClient/runStream.js"; @@ -470,6 +471,47 @@ describe("RunSubscription", () => { }); }); +describe("SSEStreamSubscription", () => { + let originalFetch: typeof global.fetch; + + beforeEach(() => { + originalFetch = global.fetch; + }); + + afterEach(() => { + global.fetch = originalFetch; + vi.restoreAllMocks(); + }); + + it("does not retry the initial fetch on 401", async () => { + const fetchMock = vi.fn().mockResolvedValue(new Response(null, { status: 401 })); + global.fetch = fetchMock; + + const sub = new SSEStreamSubscription("https://api.test/realtime/v1/streams/run_x/chat", { + headers: { Authorization: "Bearer expired" }, + }); + + const stream = await sub.subscribe(); + const reader = stream.getReader(); + await expect(reader.read()).rejects.toMatchObject({ status: 401 }); + expect(fetchMock).toHaveBeenCalledTimes(1); + }); + + it("does not retry the initial fetch on 403", async () => { + const fetchMock = vi.fn().mockResolvedValue(new Response(null, { status: 403 })); + global.fetch = fetchMock; + + const sub = new SSEStreamSubscription("https://api.test/realtime/v1/streams/run_x/chat", { + headers: { Authorization: "Bearer denied" }, + }); + + const stream = await sub.subscribe(); + const reader = stream.getReader(); + await expect(reader.read()).rejects.toMatchObject({ status: 403 }); + expect(fetchMock).toHaveBeenCalledTimes(1); + }); +}); + export async function convertAsyncIterableToArray(iterable: AsyncIterable): Promise { const result: T[] = []; for await (const item of iterable) { diff --git a/packages/trigger-sdk/package.json b/packages/trigger-sdk/package.json index e1ff05c4de9..14084af77cd 100644 --- a/packages/trigger-sdk/package.json +++ b/packages/trigger-sdk/package.json @@ -24,7 +24,9 @@ "./package.json": "./package.json", ".": "./src/v3/index.ts", "./v3": "./src/v3/index.ts", - "./ai": "./src/v3/ai.ts" + "./ai": "./src/v3/ai.ts", + "./chat": "./src/v3/chat.ts", + "./chat/react": "./src/v3/chat-react.ts" }, "sourceDialects": [ "@triggerdotdev/source" @@ -37,6 +39,12 @@ ], "ai": [ "dist/commonjs/v3/ai.d.ts" + ], + "chat": [ + "dist/commonjs/v3/chat.d.ts" + ], + "chat/react": [ + "dist/commonjs/v3/chat-react.d.ts" ] } }, @@ -66,10 +74,11 @@ "devDependencies": { "@arethetypeswrong/cli": "^0.15.4", "@types/debug": "^4.1.7", + "@types/react": "^19.2.14", "@types/slug": "^5.0.3", "@types/uuid": "^9.0.0", "@types/ws": "^8.5.3", - "ai": "^6.0.0", + "ai": "^6.0.116", "encoding": "^0.1.13", "rimraf": "^3.0.2", "tshy": "^3.0.2", @@ -78,12 +87,16 @@ "zod": "3.25.76" }, "peerDependencies": { - "zod": "^3.0.0 || ^4.0.0", - "ai": "^4.2.0 || ^5.0.0 || ^6.0.0" + "ai": "^5.0.0 || ^6.0.0", + "react": "^18.0 || ^19.0", + "zod": "^3.0.0 || ^4.0.0" }, "peerDependenciesMeta": { "ai": { "optional": true + }, + "react": { + "optional": true } }, "engines": { @@ -123,6 +136,28 @@ "types": "./dist/commonjs/v3/ai.d.ts", "default": "./dist/commonjs/v3/ai.js" } + }, + "./chat": { + "import": { + "@triggerdotdev/source": "./src/v3/chat.ts", + "types": "./dist/esm/v3/chat.d.ts", + "default": "./dist/esm/v3/chat.js" + }, + "require": { + "types": "./dist/commonjs/v3/chat.d.ts", + "default": "./dist/commonjs/v3/chat.js" + } + }, + "./chat/react": { + "import": { + "@triggerdotdev/source": "./src/v3/chat-react.ts", + "types": "./dist/esm/v3/chat-react.d.ts", + "default": "./dist/esm/v3/chat-react.js" + }, + "require": { + "types": "./dist/commonjs/v3/chat-react.d.ts", + "default": "./dist/commonjs/v3/chat-react.js" + } } }, "main": "./dist/commonjs/v3/index.js", diff --git a/packages/trigger-sdk/src/v3/ai.ts b/packages/trigger-sdk/src/v3/ai.ts index 59afa2fe21a..15112a0cc6a 100644 --- a/packages/trigger-sdk/src/v3/ai.ts +++ b/packages/trigger-sdk/src/v3/ai.ts @@ -1,17 +1,86 @@ import { + accessoryAttributes, AnyTask, + getSchemaParseFn, isSchemaZodEsque, + logger, + SemanticInternalAttributes, Task, + taskContext, type inferSchemaIn, + type inferSchemaOut, + type PipeStreamOptions, + type TaskIdentifier, + type TaskOptions, type TaskSchema, type TaskWithSchema, } from "@trigger.dev/core/v3"; -import { dynamicTool, jsonSchema, JSONSchema7, Schema, Tool, ToolCallOptions, zodSchema } from "ai"; +import type { + ModelMessage, + ToolSet, + UIMessage, + UIMessageChunk, + UIMessageStreamOptions, + LanguageModelUsage, +} from "ai"; +import type { StreamWriteResult } from "@trigger.dev/core/v3"; +import { + convertToModelMessages, + dynamicTool, + generateId as generateMessageId, + jsonSchema, + JSONSchema7, + Schema, + tool as aiTool, + Tool, + ToolCallOptions, + zodSchema, +} from "ai"; +import { type Attributes, trace } from "@opentelemetry/api"; +import { auth } from "./auth.js"; +import { locals } from "./locals.js"; import { metadata } from "./metadata.js"; +import type { ResolvedPrompt } from "./prompt.js"; +import { streams } from "./streams.js"; +import { createTask } from "./shared.js"; +import { tracer } from "./tracer.js"; +import { + CHAT_STREAM_KEY as _CHAT_STREAM_KEY, + CHAT_MESSAGES_STREAM_ID, + CHAT_STOP_STREAM_ID, +} from "./chat-constants.js"; const METADATA_KEY = "tool.execute.options"; -export type ToolCallExecutionOptions = Omit; +/** + * Wrapper around `convertToModelMessages` that always passes + * `ignoreIncompleteToolCalls: true` to prevent failures from + * stopped/aborted conversations with partial tool parts. + */ +function toModelMessages(messages: UIMessage[]): Promise { + return convertToModelMessages(messages, { ignoreIncompleteToolCalls: true }); +} + +export type ToolCallExecutionOptions = { + toolCallId: string; + experimental_context?: unknown; + /** Chat context — only present when the tool runs inside a chat.task turn. */ + chatId?: string; + turn?: number; + continuation?: boolean; + clientData?: unknown; + /** Serialized chat.local values from the parent run. @internal */ + chatLocals?: Record; +}; + +/** Chat context stored in locals during each chat.task turn for auto-detection. */ +type ChatTurnContext = { + chatId: string; + turn: number; + continuation: boolean; + clientData?: TClientData; +}; +const chatTurnContextKey = locals.create("chat.turnContext"); type ToolResultContent = Array< | { @@ -29,10 +98,134 @@ export type ToolOptions = { experimental_toToolResultContent?: (result: TResult) => ToolResultContent; }; +/** Satisfies AI SDK `ToolSet` index signature alongside concrete `Tool` input/output types. */ +type ToolSetCompatible> = T & NonNullable; + +function assertTaskUsableAsTool(task: AnyTask): void { + if (("schema" in task && !task.schema) || ("jsonSchema" in task && !task.jsonSchema)) { + throw new Error( + "Cannot convert this task to to a tool because the task has no schema. Make sure to either use schemaTask or a task with an input jsonSchema." + ); + } +} + +/** + * Shared implementation: run a task as a tool invocation (`triggerAndSubscribe` + tool metadata). + * Used by {@link toolExecute} and the deprecated `ai.tool()` wrapper. + */ +function createTaskToolExecuteHandler< + TIdentifier extends string, + TTaskSchema extends TaskSchema | undefined = undefined, + TInput = void, + TOutput = unknown, +>( + task: TaskWithSchema | Task +): (input: unknown, toolOpts: ToolCallOptions | undefined) => Promise { + assertTaskUsableAsTool(task); + + return async function taskToolExecuteHandler( + input: unknown, + toolOpts: ToolCallOptions | undefined + ): Promise { + const toolMeta: ToolCallExecutionOptions = { + toolCallId: toolOpts?.toolCallId ?? "", + }; + if (toolOpts?.experimental_context !== undefined) { + try { + toolMeta.experimental_context = JSON.parse(JSON.stringify(toolOpts.experimental_context)); + } catch { + /* non-serializable */ + } + } + + const chatCtx = locals.get(chatTurnContextKey); + if (chatCtx) { + toolMeta.chatId = chatCtx.chatId; + toolMeta.turn = chatCtx.turn; + toolMeta.continuation = chatCtx.continuation; + toolMeta.clientData = chatCtx.clientData; + } + + const chatLocals: Record = {}; + for (const entry of chatLocalRegistry) { + const value = locals.get(entry.key); + if (value !== undefined) { + chatLocals[entry.id] = value; + } + } + if (Object.keys(chatLocals).length > 0) { + toolMeta.chatLocals = chatLocals; + } + + return await task + .triggerAndSubscribe(input as inferSchemaIn, { + metadata: { + [METADATA_KEY]: toolMeta as any, + }, + tags: toolOpts?.toolCallId ? [`toolCallId:${toolOpts.toolCallId}`] : undefined, + signal: toolOpts?.abortSignal, + }) + .unwrap(); + }; +} + +/** + * Returns an `execute` function for the AI SDK `tool()` helper (or any compatible tool definition). + * Preferred API for task-backed tools: the same Trigger wiring as the deprecated `ai.tool()` + * (`triggerAndSubscribe`, tool-call metadata, chat context, `chat.local` serialization) without + * building the tool object. You supply `description`, `inputSchema`, and any AI-SDK-only options + * (e.g. `experimental_toToolResultContent`) on `tool()` yourself. + * + * @example + * ```ts + * import { tool } from "ai"; + * import { z } from "zod"; + * import { ai } from "@trigger.dev/sdk/ai"; + * import { myTask } from "./trigger/myTask"; + * + * export const myTool = tool({ + * description: myTask.description ?? "", + * inputSchema: z.object({ id: z.string() }), + * execute: ai.toolExecute(myTask), + * }); + * ``` + */ +function toolExecute( + task: Task +): (input: TInput, toolOpts: ToolCallOptions) => Promise; +function toolExecute< + TIdentifier extends string, + TTaskSchema extends TaskSchema | undefined = undefined, + TOutput = unknown, +>( + task: TaskWithSchema +): (input: inferSchemaIn, toolOpts: ToolCallOptions) => Promise; +function toolExecute< + TIdentifier extends string, + TTaskSchema extends TaskSchema | undefined = undefined, + TInput = void, + TOutput = unknown, +>( + task: TaskWithSchema | Task +): ( + input: TTaskSchema extends TaskSchema ? inferSchemaIn : TInput, + toolOpts: ToolCallOptions +) => Promise { + return createTaskToolExecuteHandler(task) as ( + input: TTaskSchema extends TaskSchema ? inferSchemaIn : TInput, + toolOpts: ToolCallOptions + ) => Promise; +} + +/** + * @deprecated Use `tool()` from the `ai` package with `execute: ai.toolExecute(task)` instead. + * This helper may be removed in a future major release. + */ function toolFromTask( task: Task, options?: ToolOptions -): Tool; +): ToolSetCompatible>; +/** @deprecated Use `tool()` from `ai` with `execute: ai.toolExecute(task)`. */ function toolFromTask< TIdentifier extends string, TTaskSchema extends TaskSchema | undefined = undefined, @@ -40,7 +233,8 @@ function toolFromTask< >( task: TaskWithSchema, options?: ToolOptions -): Tool, TOutput>; +): ToolSetCompatible, TOutput>>; +/** @deprecated Use `tool()` from `ai` with `execute: ai.toolExecute(task)`. */ function toolFromTask< TIdentifier extends string, TTaskSchema extends TaskSchema | undefined = undefined, @@ -49,35 +243,41 @@ function toolFromTask< >( task: TaskWithSchema | Task, options?: ToolOptions -): TTaskSchema extends TaskSchema - ? Tool, TOutput> - : Tool { - if (("schema" in task && !task.schema) || ("jsonSchema" in task && !task.jsonSchema)) { - throw new Error( - "Cannot convert this task to to a tool because the task has no schema. Make sure to either use schemaTask or a task with an input jsonSchema." - ); +): ToolSetCompatible< + TTaskSchema extends TaskSchema ? Tool, TOutput> : Tool +> { + const executeFromTaskInput = createTaskToolExecuteHandler(task); + + // Zod-backed tasks: use static `tool()` so runtime shape matches `ToolSet`. Generic task context + // prevents `tool()` overloads from inferring input; `as any` is localized to this call only. + if ("schema" in task && task.schema && isSchemaZodEsque(task.schema)) { + const staticTool = aiTool({ + description: task.description ?? "", + inputSchema: zodSchema(task.schema as any), + execute: async (input: unknown, toolOpts: ToolCallOptions) => + executeFromTaskInput(input, toolOpts), + ...(options?.experimental_toToolResultContent !== undefined + ? { experimental_toToolResultContent: options.experimental_toToolResultContent } + : {}), + } as any); + return staticTool as unknown as ToolSetCompatible< + TTaskSchema extends TaskSchema ? Tool, TOutput> : Tool + >; } const toolDefinition = dynamicTool({ description: task.description, inputSchema: convertTaskSchemaToToolParameters(task), - execute: async (input, options) => { - const serializedOptions = options ? JSON.parse(JSON.stringify(options)) : undefined; - - return await task - .triggerAndWait(input as inferSchemaIn, { - metadata: { - [METADATA_KEY]: serializedOptions, - }, - }) - .unwrap(); - }, - ...options, + ...(options?.experimental_toToolResultContent !== undefined + ? { experimental_toToolResultContent: options.experimental_toToolResultContent } + : {}), + execute: async (input: unknown, toolOpts: ToolCallOptions) => + executeFromTaskInput(input, toolOpts), }); - return toolDefinition as TTaskSchema extends TaskSchema - ? Tool, TOutput> - : Tool; + return toolDefinition as unknown as ToolSetCompatible< + TTaskSchema extends TaskSchema ? Tool, TOutput> : Tool + >; } function getToolOptionsFromMetadata(): ToolCallExecutionOptions | undefined { @@ -88,6 +288,61 @@ function getToolOptionsFromMetadata(): ToolCallExecutionOptions | undefined { return tool as ToolCallExecutionOptions; } +/** + * Get the current tool call ID from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`). + * Returns `undefined` if not running as a tool subtask. + */ +function getToolCallId(): string | undefined { + return getToolOptionsFromMetadata()?.toolCallId; +} + +/** + * Get the chat context from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`) within a `chat.task`. + * Pass `typeof yourChatTask` as the type parameter to get typed `clientData`. + * Returns `undefined` if the parent is not a chat task. + * + * @example + * ```ts + * const ctx = ai.chatContext(); + * // ctx?.clientData is typed based on myChat's clientDataSchema + * ``` + */ +function getToolChatContext(): + | ChatTurnContext> + | undefined { + const opts = getToolOptionsFromMetadata(); + if (!opts?.chatId) return undefined; + return { + chatId: opts.chatId, + turn: opts.turn ?? 0, + continuation: opts.continuation ?? false, + clientData: opts.clientData as InferChatClientData, + }; +} + +/** + * Get the chat context from inside a subtask, throwing if not in a chat context. + * Pass `typeof yourChatTask` as the type parameter to get typed `clientData`. + * + * @example + * ```ts + * const ctx = ai.chatContextOrThrow(); + * // ctx.chatId, ctx.clientData are guaranteed non-null + * ``` + */ +function getToolChatContextOrThrow(): ChatTurnContext< + InferChatClientData +> { + const ctx = getToolChatContext(); + if (!ctx) { + throw new Error( + "ai.chatContextOrThrow() called outside of a chat.task context. " + + "This helper can only be used inside a subtask invoked via ai.toolExecute() (or legacy ai.tool()) from a chat.task." + ); + } + return ctx; +} + function convertTaskSchemaToToolParameters( task: AnyTask | TaskWithSchema ): Schema { @@ -113,6 +368,4370 @@ function convertTaskSchemaToToolParameters( } export const ai = { + /** + * @deprecated Use `tool()` from the `ai` package with `execute: ai.toolExecute(task)` instead. + */ tool: toolFromTask, + /** + * Preferred: return value for the `execute` field of AI SDK `tool()`. Keeps Trigger subtask and + * metadata behavior without coupling to a specific `ai` version’s `Tool` / `ToolSet` types. + */ + toolExecute, currentToolOptions: getToolOptionsFromMetadata, + /** Get the tool call ID from inside a subtask invoked via `ai.toolExecute()` (or legacy `ai.tool()`). */ + toolCallId: getToolCallId, + /** Get chat context (chatId, turn, clientData, etc.) from inside a subtask of a `chat.task`. Returns undefined if not in a chat context. */ + chatContext: getToolChatContext, + /** Get chat context or throw if not in a chat context. Pass `typeof yourChatTask` for typed clientData. */ + chatContextOrThrow: getToolChatContextOrThrow, +}; + +/** + * Creates a public access token for a chat task. + * + * This is a convenience helper that creates a multi-use trigger public token + * scoped to the given task. Use it in a server action to provide the frontend + * `TriggerChatTransport` with an `accessToken`. + * + * @example + * ```ts + * // actions.ts + * "use server"; + * import { chat } from "@trigger.dev/sdk/ai"; + * import type { myChat } from "@/trigger/chat"; + * + * export const getChatToken = () => chat.createAccessToken("my-chat"); + * ``` + */ +function createChatAccessToken( + taskId: TaskIdentifier +): Promise { + return auth.createTriggerPublicToken(taskId as string, { expirationTime: "24h" }); +} + +// --------------------------------------------------------------------------- +// Chat transport helpers — backend side +// --------------------------------------------------------------------------- + +/** + * The default stream key used for chat transport communication. + * Both `TriggerChatTransport` (frontend) and `pipeChat`/`chatTask` (backend) + * use this key by default. + */ +export const CHAT_STREAM_KEY = _CHAT_STREAM_KEY; + +// Re-export input stream IDs for advanced usage +export { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID }; + +/** + * Typed chat output stream. Provides `.writer()`, `.pipe()`, `.append()`, + * and `.read()` methods pre-bound to the chat stream key and typed to `UIMessageChunk`. + * + * Use from within a `chat.task` run to write custom chunks: + * ```ts + * const { waitUntilComplete } = chat.stream.writer({ + * execute: ({ write }) => { + * write({ type: "text-start", id: "status-1" }); + * write({ type: "text-delta", id: "status-1", delta: "Processing..." }); + * write({ type: "text-end", id: "status-1" }); + * }, + * }); + * await waitUntilComplete(); + * ``` + * + * Use from a subtask to stream back to the parent chat: + * ```ts + * chat.stream.pipe(myStream, { target: "root" }); + * ``` + */ +const chatStream = streams.define({ id: _CHAT_STREAM_KEY }); + +// --------------------------------------------------------------------------- +// ChatWriter — stream writer for callbacks +// --------------------------------------------------------------------------- + +/** + * A stream writer passed to chat lifecycle callbacks (`onPreload`, `onChatStart`, + * `onTurnStart`, `onTurnComplete`, `onCompacted`). + * + * Write custom `UIMessageChunk` parts (e.g. `data-*` parts) directly to the chat + * stream without the ceremony of `chat.stream.writer({ execute })`. + * + * The writer is lazy — no stream overhead if you don't call `write()` or `merge()`. + * + * @example + * ```ts + * onTurnStart: async ({ writer }) => { + * writer.write({ type: "data-status", data: { loading: true } }); + * }, + * onTurnComplete: async ({ writer, uiMessages }) => { + * writer.write({ type: "data-analytics", data: { messageCount: uiMessages.length } }); + * }, + * ``` + */ +export type ChatWriter = { + /** Write a single UIMessageChunk to the chat stream. */ + write(part: UIMessageChunk): void; + /** Merge another stream's chunks into the chat stream. */ + merge(stream: ReadableStream): void; +}; + +/** + * Creates a lazy ChatWriter that only opens a realtime stream on first use. + * Call `flush()` after the callback returns to await stream completion. + * @internal + */ +function createLazyChatWriter(): { writer: ChatWriter; flush: () => Promise } { + let writeImpl: ((part: UIMessageChunk) => void) | null = null; + let mergeImpl: ((stream: ReadableStream) => void) | null = null; + let waitPromise: (() => Promise) | null = null; + let resolveExecute: (() => void) | null = null; + + function ensureInitialized() { + if (writeImpl) return; + + const executePromise = new Promise((resolve) => { + resolveExecute = resolve; + }); + + const { waitUntilComplete } = chatStream.writer({ + collapsed: true, + spanName: "callback writer", + execute: ({ write, merge }) => { + writeImpl = write; + mergeImpl = merge; + return executePromise; // Keep execute alive until flush() + }, + }); + waitPromise = waitUntilComplete; + } + + return { + writer: { + write(part: UIMessageChunk) { + ensureInitialized(); + writeImpl!(part); + }, + merge(stream: ReadableStream) { + ensureInitialized(); + mergeImpl!(stream); + }, + }, + async flush() { + if (resolveExecute) { + resolveExecute(); // Signal execute to complete + await waitPromise!(); // Wait for stream to finish piping + } + }, + }; +} + +/** + * Runs a callback with a lazy ChatWriter, flushing the stream after completion. + * @internal + */ +async function withChatWriter(fn: (writer: ChatWriter) => Promise | T): Promise { + const { writer, flush } = createLazyChatWriter(); + const result = await fn(writer); + await flush(); + return result; +} + +/** + * The wire payload shape sent by `TriggerChatTransport`. + * Uses `metadata` to match the AI SDK's `ChatRequestOptions` field name. + */ +export type ChatTaskWirePayload = { + messages: TMessage[]; + chatId: string; + trigger: "submit-message" | "regenerate-message" | "preload"; + messageId?: string; + metadata?: TMetadata; + /** Whether this run is continuing an existing chat whose previous run ended. */ + continuation?: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; + /** Override idle timeout for this run (seconds). Set by transport.preload(). */ + idleTimeoutInSeconds?: number; +}; + +/** + * The payload shape passed to the `chatTask` run function. + * + * - `messages` contains model-ready messages (converted via `convertToModelMessages`) — + * pass these directly to `streamText`. + * - `clientData` contains custom data from the frontend (the `metadata` field from `sendMessage()`). + * + * The backend accumulates the full conversation history across turns, so the frontend + * only needs to send new messages after the first turn. + */ +export type ChatTaskPayload = { + /** Model-ready messages — pass directly to `streamText({ messages })`. */ + messages: ModelMessage[]; + + /** The unique identifier for the chat session */ + chatId: string; + + /** + * The trigger type: + * - `"submit-message"`: A new user message + * - `"regenerate-message"`: Regenerate the last assistant response + * - `"preload"`: Run was preloaded before the first message (only on turn 0) + */ + trigger: "submit-message" | "regenerate-message" | "preload"; + + /** The ID of the message to regenerate (only for `"regenerate-message"`) */ + messageId?: string; + + /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ + clientData?: TClientData; + + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; +}; + +/** + * Abort signals provided to the `chatTask` run function. + */ +export type ChatTaskSignals = { + /** Combined signal — fires on run cancel OR stop generation. Pass to `streamText`. */ + signal: AbortSignal; + /** Fires only when the run is cancelled, expired, or exceeds maxDuration. */ + cancelSignal: AbortSignal; + /** Fires only when the frontend stops generation for this turn (per-turn, reset each turn). */ + stopSignal: AbortSignal; +}; + +/** + * The full payload passed to a `chatTask` run function. + * Extends `ChatTaskPayload` (the wire payload) with abort signals. + */ +export type ChatTaskRunPayload = ChatTaskPayload & + ChatTaskSignals & { + /** Token usage from the previous turn. Undefined on turn 0. */ + previousTurnUsage?: LanguageModelUsage; + /** Cumulative token usage across all completed turns so far. */ + totalUsage: LanguageModelUsage; + }; + +// Input streams for bidirectional chat communication +const messagesInput = streams.input({ id: CHAT_MESSAGES_STREAM_ID }); +const stopInput = streams.input<{ stop: true; message?: string }>({ id: CHAT_STOP_STREAM_ID }); + +/** + * Per-turn deferred promises. Registered via `chat.defer()`, awaited + * before `onTurnComplete` fires. Reset each turn. + * @internal + */ +const chatDeferKey = locals.create>>("chat.defer"); + +/** + * Per-turn background context queue. Messages added via `chat.backgroundWork.inject()` + * are drained at the next `prepareStep` boundary and appended to the model messages. + * @internal + */ +const chatBackgroundQueueKey = locals.create("chat.backgroundQueue"); + +/** + * Run-scoped pipe counter. Stored in locals so concurrent runs in the + * same worker don't share state. + * @internal + */ +const chatPipeCountKey = locals.create("chat.pipeCount"); +const chatStopControllerKey = locals.create("chat.stopController"); +/** Static (task-level) UIMessageStream options, set once during chatTask setup. @internal */ +const chatUIStreamStaticKey = locals.create>( + "chat.uiMessageStreamOptions.static" +); +/** Per-turn UIMessageStream options, set via chat.setUIMessageStreamOptions(). @internal */ +const chatUIStreamPerTurnKey = locals.create>( + "chat.uiMessageStreamOptions.perTurn" +); + +// --------------------------------------------------------------------------- +// Token usage helpers (internal) +// --------------------------------------------------------------------------- + +/** Convenience re-export of the AI SDK's `LanguageModelUsage` type. */ +export type ChatTurnUsage = LanguageModelUsage; + +function emptyUsage(): LanguageModelUsage { + return { + inputTokens: undefined, + outputTokens: undefined, + totalTokens: undefined, + inputTokenDetails: { + noCacheTokens: undefined, + cacheReadTokens: undefined, + cacheWriteTokens: undefined, + }, + outputTokenDetails: { textTokens: undefined, reasoningTokens: undefined }, + }; +} + +function addUsage(a: LanguageModelUsage, b: LanguageModelUsage): LanguageModelUsage { + const add = (x: number | undefined, y: number | undefined) => + x != null || y != null ? (x ?? 0) + (y ?? 0) : undefined; + return { + inputTokens: add(a.inputTokens, b.inputTokens), + outputTokens: add(a.outputTokens, b.outputTokens), + totalTokens: add(a.totalTokens, b.totalTokens), + inputTokenDetails: { + noCacheTokens: add(a.inputTokenDetails?.noCacheTokens, b.inputTokenDetails?.noCacheTokens), + cacheReadTokens: add( + a.inputTokenDetails?.cacheReadTokens, + b.inputTokenDetails?.cacheReadTokens + ), + cacheWriteTokens: add( + a.inputTokenDetails?.cacheWriteTokens, + b.inputTokenDetails?.cacheWriteTokens + ), + }, + outputTokenDetails: { + textTokens: add(a.outputTokenDetails?.textTokens, b.outputTokenDetails?.textTokens), + reasoningTokens: add( + a.outputTokenDetails?.reasoningTokens, + b.outputTokenDetails?.reasoningTokens + ), + }, + }; +} + +// --------------------------------------------------------------------------- +// chat.setMessages — replace accumulated messages for compaction +// --------------------------------------------------------------------------- + +/** @internal */ +const chatOverrideMessagesKey = locals.create("chat.overrideMessages"); + +/** + * Replace the accumulated conversation messages for the current run. + * + * Call from `onTurnStart` to compact before `run()` executes, or from + * `onTurnComplete` to compact before the next turn. Takes `UIMessage[]` + * and converts to `ModelMessage[]` internally. + */ +function setChatMessages(uiMessages: TUIM[]): void { + locals.set(chatOverrideMessagesKey, uiMessages); +} + +/** + * Model-only message override. Set by compaction to replace only the model + * messages (what goes to the LLM) without affecting UI messages (what gets + * persisted and displayed). This preserves full conversation history for the + * user while keeping LLM context compact. + * @internal + */ +const chatOverrideModelMessagesKey = locals.create("chat.overrideModelMessages"); + +// --------------------------------------------------------------------------- +// chat.compaction — prepareStep compaction API +// --------------------------------------------------------------------------- + +/** State stored in locals during prepareStep compaction. */ +interface CompactionState { + summary: string; + baseResponseMessageCount: number; +} + +/** @internal */ +const chatCompactionStateKey = locals.create("chat.compaction"); +const chatOnCompactedKey = + locals.create<(event: CompactedEvent) => Promise | void>("chat.onCompacted"); +const chatPrepareMessagesKey = + locals.create<(event: PrepareMessagesEvent) => ModelMessage[] | Promise>( + "chat.prepareMessages" + ); + +/** + * Event passed to `summarize` callbacks. + */ +export type SummarizeEvent = { + /** The current model messages to summarize. */ + messages: ModelMessage[]; + /** Full usage object from the triggering step/turn. */ + usage?: LanguageModelUsage; + /** Cumulative token usage across all completed turns. Present in chat.task contexts. */ + totalUsage?: LanguageModelUsage; + /** The chat session ID (if running inside a chat.task). */ + chatId?: string; + /** The current turn number (0-indexed, if inside a chat.task). */ + turn?: number; + /** Custom data from the frontend (if inside a chat.task). */ + clientData?: unknown; + /** + * Where compaction is running: + * - `"inner"` — between tool-call steps (prepareStep) + * - `"outer"` — between turns + */ + source?: "inner" | "outer"; + /** The step number (0-indexed). Only present when `source` is `"inner"`. */ + stepNumber?: number; +}; + +/** + * Event passed to `compactUIMessages` and `compactModelMessages` callbacks. + */ +export type CompactMessagesEvent = { + /** The generated summary text. */ + summary: string; + /** The current UI messages (full conversation). */ + uiMessages: TUIM[]; + /** The current model messages (full conversation). */ + modelMessages: ModelMessage[]; + /** The chat session ID. */ + chatId: string; + /** The current turn number (0-indexed). */ + turn: number; + /** Custom data from the frontend. */ + clientData?: unknown; + /** + * Where compaction is running: + * - `"inner"` — between tool-call steps (prepareStep) + * - `"outer"` — between turns + */ + source: "inner" | "outer"; +}; + +/** + * Options for the `compaction` field on `chat.task()`. + * + * Handles compaction automatically in both the inner loop (prepareStep, between + * tool-call steps) and the outer loop (between turns, for single-step responses + * where prepareStep never fires). + */ +export type ChatTaskCompactionOptions = { + /** Decide whether to compact. Return true to trigger compaction. */ + shouldCompact: (event: ShouldCompactEvent) => boolean | Promise; + /** Generate a summary from the current messages. Return the summary text. */ + summarize: (event: SummarizeEvent) => Promise; + /** + * Transform UI messages after compaction (what gets persisted and displayed). + * Default: preserve all UI messages unchanged. + * + * @example + * ```ts + * // Flatten to summary + * compactUIMessages: ({ summary }) => [{ + * id: generateId(), role: "assistant", + * parts: [{ type: "text", text: `[Summary]\n\n${summary}` }], + * }], + * + * // Summary + keep last 4 messages + * compactUIMessages: ({ uiMessages, summary }) => [ + * { id: generateId(), role: "assistant", + * parts: [{ type: "text", text: `[Summary]\n\n${summary}` }] }, + * ...uiMessages.slice(-4), + * ], + * ``` + */ + compactUIMessages?: (event: CompactMessagesEvent) => TUIM[] | Promise; + /** + * Transform model messages after compaction (what gets sent to the LLM). + * Default: replace all with a single summary message. + * + * @example + * ```ts + * // Summary + keep last 2 model messages + * compactModelMessages: ({ modelMessages, summary }) => [ + * { role: "user", content: summary }, + * ...modelMessages.slice(-2), + * ], + * ``` + */ + compactModelMessages?: ( + event: CompactMessagesEvent + ) => ModelMessage[] | Promise; +}; + +/** @internal */ +const chatTaskCompactionKey = + locals.create>("chat.taskCompaction"); + +// --------------------------------------------------------------------------- +// Pending messages — mid-execution message injection via prepareStep +// --------------------------------------------------------------------------- + +/** + * Event passed to `shouldInject` and `prepareMessages` callbacks. + */ +export type PendingMessagesBatchEvent = { + /** All pending UI messages that arrived during streaming (batch). */ + messages: TUIM[]; + /** Current model messages in the conversation. */ + modelMessages: ModelMessage[]; + /** Completed steps so far. */ + steps: CompactionStep[]; + /** Current step number (0-indexed). */ + stepNumber: number; + /** Chat session ID. */ + chatId: string; + /** Current turn number (0-indexed). */ + turn: number; + /** Custom data from the frontend. */ + clientData?: unknown; +}; + +/** + * Event passed to `onReceived` callback (per-message, as they arrive). + */ +export type PendingMessageReceivedEvent = { + /** The UI message that arrived during streaming. */ + message: TUIM; + /** Chat session ID. */ + chatId: string; + /** Current turn number (0-indexed). */ + turn: number; +}; + +/** + * Event passed to `onInjected` callback (batch, after injection). + */ +export type PendingMessagesInjectedEvent = { + /** All UI messages that were injected. */ + messages: TUIM[]; + /** The model messages that were injected. */ + injectedModelMessages: ModelMessage[]; + /** Chat session ID. */ + chatId: string; + /** Current turn number (0-indexed). */ + turn: number; + /** Step number where injection occurred. */ + stepNumber: number; +}; + +/** + * Options for the `pendingMessages` field on `chat.task()`, `chat.createSession()`, + * or `ChatMessageAccumulator`. + * + * Configures how messages that arrive during streaming are handled. When + * `shouldInject` is provided and returns `true`, the full batch of pending + * messages is injected between tool-call steps via `prepareStep`. + * Otherwise, messages queue for the next turn. + */ +export type PendingMessagesOptions = { + /** + * Decide whether to inject pending messages between tool-call steps. + * Called once per step boundary with the full batch of pending messages. + * If absent, no injection happens — messages only queue for the next turn. + */ + shouldInject?: (event: PendingMessagesBatchEvent) => boolean | Promise; + /** + * Transform the batch of pending messages before injection. + * Return the model messages to inject. + * Default: convert each UI message via `convertToModelMessages`. + */ + prepare?: (event: PendingMessagesBatchEvent) => ModelMessage[] | Promise; + /** Called when a message arrives during streaming (per-message). */ + onReceived?: (event: PendingMessageReceivedEvent) => void | Promise; + /** Called after a batch of messages is injected via `prepareStep`. */ + onInjected?: (event: PendingMessagesInjectedEvent) => void | Promise; +}; + +/** + * The data part type used to signal that pending messages were injected + * between tool-call steps. The frontend can match on this to render + * injection points inline in the assistant response. + */ +export const PENDING_MESSAGE_INJECTED_TYPE = "data-pending-message-injected" as const; + +/** @internal */ +type SteeringQueueEntry = { uiMessage: UIMessage; modelMessages: ModelMessage[] }; +/** @internal */ +const chatPendingMessagesKey = locals.create("chat.pendingMessages"); +/** @internal */ +const chatSteeringQueueKey = locals.create("chat.steeringQueue"); +/** @internal — IDs of messages that were successfully injected via prepareStep */ +const chatInjectedMessageIdsKey = locals.create>("chat.injectedMessageIds"); + +/** + * Event passed to the `prepareMessages` hook. + */ +export type PrepareMessagesEvent = { + /** The messages to transform. Return the transformed array. */ + messages: ModelMessage[]; + /** Why messages are being prepared. */ + reason: + | "run" // Messages being passed to run() for streamText + | "compaction-rebuild" // Rebuilding from a previous compaction summary + | "compaction-result"; // Fresh compaction just produced these messages + /** The chat session ID. */ + chatId: string; + /** The current turn number (0-indexed). */ + turn: number; + /** Custom data from the frontend. */ + clientData?: TClientData; +}; + +/** + * Data shape for `data-compaction` stream chunks emitted during compaction. + * Use to type the `data` field when rendering compaction parts in the frontend. + */ +export type CompactionChunkData = { + status: "compacting" | "complete"; + totalTokens: number | undefined; +}; + +/** + * Event passed to the `onCompacted` callback. + */ +export type CompactedEvent = { + /** The generated summary text. */ + summary: string; + /** The messages that were compacted (pre-compaction). */ + messages: ModelMessage[]; + /** Number of messages before compaction. */ + messageCount: number; + /** Token usage from the step that triggered compaction. */ + usage: LanguageModelUsage; + /** Total token count that triggered compaction. */ + totalTokens: number | undefined; + /** Input token count from the triggering step. */ + inputTokens: number | undefined; + /** Output token count from the triggering step. */ + outputTokens: number | undefined; + /** The step number where compaction occurred (0-indexed). */ + stepNumber: number; + /** The chat session ID (if running inside a chat.task). */ + chatId?: string; + /** The current turn number (if running inside a chat.task). */ + turn?: number; + /** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */ + writer: ChatWriter; +}; + +/** + * Event passed to `shouldCompact` callbacks. + */ +export type ShouldCompactEvent = { + /** The current model messages (full conversation). */ + messages: ModelMessage[]; + /** Total token count from the triggering step/turn. */ + totalTokens: number | undefined; + /** Input token count from the triggering step/turn. */ + inputTokens: number | undefined; + /** Output token count from the triggering step/turn. */ + outputTokens: number | undefined; + /** Full usage object from the triggering step/turn. */ + usage?: LanguageModelUsage; + /** Cumulative token usage across all completed turns. Present in chat.task contexts. */ + totalUsage?: LanguageModelUsage; + /** The chat session ID (if running inside a chat.task). */ + chatId?: string; + /** The current turn number (0-indexed, if inside a chat.task). */ + turn?: number; + /** Custom data from the frontend (if inside a chat.task). */ + clientData?: unknown; + /** + * Where this check is running: + * - `"inner"` — between tool-call steps (prepareStep) + * - `"outer"` — between turns (after response, before onBeforeTurnComplete) + */ + source?: "inner" | "outer"; + /** The step number (0-indexed). Only present when `source` is `"inner"`. */ + stepNumber?: number; + /** The steps array from prepareStep. Only present when `source` is `"inner"`. */ + steps?: CompactionStep[]; +}; + +/** + * Options for `chat.compaction()` — the high-level prepareStep factory. + */ +export type CompactionOptions = { + /** Generate a summary from the current messages. Return the summary text. */ + summarize: (messages: ModelMessage[]) => Promise; + /** Token threshold — compact when totalTokens exceeds this. Ignored if `shouldCompact` is provided. */ + threshold?: number; + /** Custom compaction trigger. When provided, used instead of `threshold`. */ + shouldCompact?: (event: ShouldCompactEvent) => boolean | Promise; +}; + +/** A step object as received in prepareStep's `steps` array. */ +export type CompactionStep = { + usage: LanguageModelUsage; + finishReason: string; + content: Array<{ type: string; toolCallId?: string }>; + response: { messages: Array }; +}; + +/** + * Result of `chat.compact()`. Discriminated union so you can inspect + * what happened, but also directly compatible with prepareStep's return type. + * + * - `"skipped"` — no compaction needed (first step, boundary unsafe, or under threshold). Return `undefined` to prepareStep. + * - `"rebuilt"` — previous compaction exists, messages rebuilt from summary + new response messages. + * - `"compacted"` — compaction just happened, includes the generated summary. + */ +export type CompactResult = + | { type: "skipped" } + | { type: "rebuilt"; messages: ModelMessage[] } + | { type: "compacted"; messages: ModelMessage[]; summary: string }; + +/** + * Options for `chat.compact()` — the low-level compaction function. + */ +export type CompactOptions = { + /** Generate a summary from the current messages. Return the summary text. */ + summarize: (messages: ModelMessage[]) => Promise; + /** Token threshold — compact when totalTokens exceeds this. Ignored if `shouldCompact` is provided. */ + threshold?: number; + /** Custom compaction trigger. When provided, used instead of `threshold`. */ + shouldCompact?: (event: ShouldCompactEvent) => boolean | Promise; +}; + +/** + * Check that no tool calls are in-flight in a step's content. + * Used before compaction to avoid losing tool state mid-execution. + * @internal + */ +function isStepBoundarySafe(step: { + finishReason: string; + content: Array<{ type: string; toolCallId?: string }>; +}): boolean { + if (step.finishReason === "error") return false; + const callIds = new Set( + step.content.filter((p) => p.type === "tool-call").map((p) => p.toolCallId) + ); + const settledIds = new Set( + step.content + .filter((p) => p.type === "tool-result" || p.type === "tool-error") + .map((p) => p.toolCallId) + ); + return ![...callIds].some((id) => !settledIds.has(id)); +} + +/** + * Apply the prepareMessages hook if one is set in locals. + * @internal + */ +async function applyPrepareMessages( + messages: ModelMessage[], + reason: PrepareMessagesEvent["reason"] +): Promise { + const hook = locals.get(chatPrepareMessagesKey); + if (!hook) return messages; + + const turnCtx = locals.get(chatTurnContextKey); + + return tracer.startActiveSpan( + "prepareMessages()", + async () => { + return hook({ + messages, + reason, + chatId: turnCtx?.chatId ?? "", + turn: turnCtx?.turn ?? 0, + clientData: turnCtx?.clientData, + }); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", + [SemanticInternalAttributes.COLLAPSED]: true, + "chat.prepareMessages.reason": reason, + "chat.prepareMessages.messageCount": messages.length, + }, + } + ); +} + +/** + * Read the current compaction state. Returns the summary and base message count + * if compaction has occurred in this turn, or `undefined` if not. + * + * Use in a custom `prepareStep` to rebuild from a previous compaction: + * ```ts + * const state = chat.getCompactionState(); + * if (state) { + * return { messages: [{ role: "user", content: state.summary }, ...newMsgs] }; + * } + * ``` + */ +function getCompactionState(): CompactionState | undefined { + return locals.get(chatCompactionStateKey); +} + +/** + * Low-level compaction for use inside a custom `prepareStep`. + * + * Handles the full decision tree: first step, already-compacted rebuild, + * boundary safety, threshold check, summarization, stream chunks, state + * storage, and accumulator update. + * + * Returns a `CompactResult` — inspect `result.type` to see what happened, + * or convert to a prepareStep return with `result.type === "skipped" ? undefined : result`. + * + * @example + * ```ts + * prepareStep: async ({ messages, steps }) => { + * // your custom logic here... + * const result = await chat.compact(messages, steps, { + * threshold: 80_000, + * summarize: async (msgs) => generateText({ model, messages: msgs }).then(r => r.text), + * }); + * if (result.type === "compacted") { + * logger.info("Compacted!", { summary: result.summary }); + * } + * return result.type === "skipped" ? undefined : result; + * }, + * ``` + */ +async function chatCompact( + messages: ModelMessage[], + steps: CompactionStep[], + options: CompactOptions +): Promise { + const currentStep = steps.at(-1); + + // First step — nothing to check + if (!currentStep) { + return { type: "skipped" }; + } + + // Already compacted — rebuild from summary + new response messages + const state = locals.get(chatCompactionStateKey); + if (state && isStepBoundarySafe(currentStep)) { + return { + type: "rebuilt", + messages: await applyPrepareMessages( + [ + { role: "user" as const, content: state.summary }, + ...currentStep.response.messages.slice(state.baseResponseMessageCount), + ], + "compaction-rebuild" + ), + }; + } + + // Boundary unsafe — skip + if (!isStepBoundarySafe(currentStep)) { + return { type: "skipped" }; + } + + const totalTokens = currentStep.usage.totalTokens; + const inputTokens = currentStep.usage.inputTokens; + const outputTokens = currentStep.usage.outputTokens; + + const turnCtx = locals.get(chatTurnContextKey); + const stepNumber = steps.length - 1; + + const shouldTrigger = options.shouldCompact + ? await options.shouldCompact({ + messages, + totalTokens, + inputTokens, + outputTokens, + usage: currentStep.usage, + source: "inner", + stepNumber, + steps, + chatId: turnCtx?.chatId, + turn: turnCtx?.turn, + clientData: turnCtx?.clientData, + }) + : totalTokens != null && options.threshold != null && totalTokens > options.threshold; + + if (!shouldTrigger) { + return { type: "skipped" }; + } + + const result = await tracer.startActiveSpan( + "context compaction", + async (span) => { + const compactionId = generateMessageId(); + let summary!: string; + + const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { + spanName: "stream compaction chunks", + collapsed: true, + execute: async ({ write, merge }) => { + write({ type: "step-start" }); + write({ + type: "data-compaction", + id: compactionId, + data: { status: "compacting", totalTokens }, + }); + + // Generate summary + summary = await options.summarize(messages); + + // Store state in locals for subsequent steps + locals.set(chatCompactionStateKey, { + summary, + baseResponseMessageCount: currentStep.response.messages.length, + }); + + // Set model-only override — UI messages stay intact for persistence. + // The summary becomes the model message history for the next turn, + // while accumulatedUIMessages keeps the full conversation for display. + locals.set(chatOverrideModelMessagesKey, [ + { + role: "assistant" as const, + content: [{ type: "text" as const, text: `[Conversation summary]\n\n${summary}` }], + }, + ]); + + // Fire onCompacted hook — pass the existing writer so the callback + // can write custom chunks without creating a separate stream. + const onCompactedHook = locals.get(chatOnCompactedKey); + if (onCompactedHook) { + await onCompactedHook({ + summary, + messages, + messageCount: messages.length, + usage: currentStep.usage, + totalTokens, + inputTokens, + outputTokens, + stepNumber, + chatId: turnCtx?.chatId, + turn: turnCtx?.turn, + writer: { write, merge }, + }); + } + + write({ + type: "data-compaction", + id: compactionId, + data: { status: "complete", totalTokens }, + }); + write({ type: "finish-step" }); + }, + }); + await waitUntilComplete(); + + // Set attributes after we have the summary + span.setAttribute("compaction.summary_length", summary.length); + + return { + type: "compacted" as const, + messages: await applyPrepareMessages( + [{ role: "user" as const, content: summary }], + "compaction-result" + ), + summary, + }; + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "tabler-scissors", + "compaction.threshold": options.threshold, + "compaction.total_tokens": totalTokens ?? 0, + "compaction.input_tokens": inputTokens ?? 0, + "compaction.message_count": messages.length, + "compaction.step_number": stepNumber, + ...(turnCtx?.chatId ? { "compaction.chat_id": turnCtx.chatId } : {}), + ...(turnCtx?.turn != null ? { "compaction.turn": turnCtx.turn } : {}), + ...accessoryAttributes({ + items: [ + { text: `${totalTokens ?? 0} tokens`, variant: "normal" }, + { text: `${messages.length} msgs`, variant: "normal" }, + ], + style: "codepath", + }), + }, + } + ); + + return result; +} + +/** + * Returns a `prepareStep` function that handles context compaction automatically. + * + * Monitors token usage between tool-call steps. When `totalTokens` exceeds + * the threshold, generates a summary via `summarize()`, replaces the message + * history, and emits `data-compaction` stream chunks for the frontend. + * + * @example + * ```ts + * return streamText({ + * ...chat.toStreamTextOptions({ registry }), + * messages: chat.addCacheBreaks(messages), + * prepareStep: chat.compactionStep({ + * threshold: 80_000, + * summarize: async (messages) => { + * return generateText({ model, messages: [...messages, { role: "user", content: "Summarize." }] }) + * .then((r) => r.text); + * }, + * }), + * tools: { ... }, + * }); + * ``` + */ +function chatCompactionStep( + options: CompactionOptions +): (args: { + messages: ModelMessage[]; + steps: CompactionStep[]; +}) => Promise<{ messages: ModelMessage[] } | undefined> { + return async ({ messages, steps }) => { + const result = await chatCompact(messages, steps, options); + return result.type === "skipped" ? undefined : result; + }; +} + +// --------------------------------------------------------------------------- +// Steering queue drain — shared by toStreamTextOptions, session, accumulator +// --------------------------------------------------------------------------- + +/** + * Drain the steering queue as a batch. Calls `shouldInject` once with all + * pending messages. If it returns true, calls `prepareMessages` once to + * transform the batch, then clears the queue. + * Returns the model messages to inject (empty if none). + * @internal + */ +async function drainSteeringQueue( + config: PendingMessagesOptions, + messages: ModelMessage[], + steps: CompactionStep[], + queueOverride?: SteeringQueueEntry[] +): Promise { + const queue = queueOverride ?? locals.get(chatSteeringQueueKey); + if (!queue || queue.length === 0) return []; + + const ctx = locals.get(chatTurnContextKey); + const stepNumber = steps.length - 1; + const uiMessages = queue.map((e) => e.uiMessage); + + const batchEvent: PendingMessagesBatchEvent = { + messages: uiMessages, + modelMessages: messages, + steps, + stepNumber, + chatId: ctx?.chatId ?? "", + turn: ctx?.turn ?? 0, + clientData: ctx?.clientData, + }; + + // Call shouldInject once for the whole batch + const shouldInject = config.shouldInject ? await config.shouldInject(batchEvent) : false; + + if (!shouldInject) return []; + + // Extract message texts for span attributes + const messageTexts = uiMessages.map( + (m) => + (m.parts ?? []) + .filter((p: any) => p.type === "text") + .map((p: any) => p.text) + .join("") || "" + ); + const previewText = + messageTexts.length === 1 ? messageTexts[0]!.slice(0, 80) : `${queue.length} messages`; + + return tracer.startActiveSpan( + "pending message injected", + async () => { + // Transform the batch — default: concatenate all pre-converted model messages + const injected = config.prepare + ? await config.prepare(batchEvent) + : queue.flatMap((e) => e.modelMessages); + + // Clear the queue and record injected IDs + queue.length = 0; + const injectedIds = locals.get(chatInjectedMessageIdsKey); + if (injectedIds) { + for (const m of uiMessages) injectedIds.add(m.id); + } + + // Write injection confirmation chunk to the stream so the frontend + // knows which messages were injected and where in the response. + if (injected.length > 0) { + try { + const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { + collapsed: true, + execute: ({ write }) => { + write({ + type: PENDING_MESSAGE_INJECTED_TYPE, + id: generateMessageId(), + data: { + messageIds: uiMessages.map((m) => m.id), + messages: uiMessages.map((m, idx) => ({ + id: m.id, + text: messageTexts[idx] ?? "", + })), + }, + }); + }, + }); + await waitUntilComplete(); + } catch { + /* non-fatal — stream write failed */ + } + } + + // Fire onInjected callback + if (config.onInjected && injected.length > 0) { + try { + await config.onInjected({ + messages: uiMessages, + injectedModelMessages: injected, + chatId: ctx?.chatId ?? "", + turn: ctx?.turn ?? 0, + stepNumber, + }); + } catch { + /* non-fatal */ + } + } + + return injected; + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "tabler-message-forward", + "pending.message_count": uiMessages.length, + "pending.step_number": stepNumber, + "pending.messages": messageTexts, + ...(ctx?.chatId ? { "pending.chat_id": ctx.chatId } : {}), + ...(ctx?.turn != null ? { "pending.turn": ctx.turn } : {}), + ...accessoryAttributes({ + items: [ + { + text: `${uiMessages.length} message${uiMessages.length === 1 ? "" : "s"}`, + variant: "normal", + }, + { text: `between steps ${stepNumber} and ${stepNumber + 1}`, variant: "normal" }, + ], + style: "codepath", + }), + }, + } + ); +} + +// --------------------------------------------------------------------------- +// chat.isCompactionSafe — check if it's safe to compact messages +// --------------------------------------------------------------------------- + +/** + * Checks whether it's safe to compact the message history. Returns `false` + * if any tool calls are in-flight (incomplete tool invocations without results). + * + * Call before `chat.setMessages()` to avoid corrupting tool-call state. + */ +function isCompactionSafe(messages: UIMessage[]): boolean { + for (const msg of messages) { + if (msg.role !== "assistant") continue; + for (const part of msg.parts as any[]) { + if (part.type === "tool-invocation") { + const state = part.toolInvocation?.state ?? part.state; + if (state !== "result" && state !== "error") { + return false; + } + } + } + } + return true; +} + +// --------------------------------------------------------------------------- +// chat.prompt — store and retrieve a resolved prompt for the current run +// --------------------------------------------------------------------------- + +/** + * A resolved prompt stored via `chat.prompt.set()`. Either a full `ResolvedPrompt` + * from `prompts.define().resolve()`, or a lightweight wrapper around a plain string. + */ +export type ChatPromptValue = + | ResolvedPrompt + | { + text: string; + model: undefined; + config: undefined; + promptId: string; + version: number; + labels: string[]; + toAISDKTelemetry: (additionalMetadata?: Record) => { + experimental_telemetry: { isEnabled: true; metadata: Record }; + }; + }; + +/** @internal */ +const chatPromptKey = locals.create("chat.prompt"); + +/** + * Store a resolved prompt (or plain string) for the current run. + * Call from any hook (`onPreload`, `onChatStart`, `onTurnStart`) or `run()`. + */ +function setChatPrompt(resolved: ResolvedPrompt | string): void { + if (typeof resolved === "string") { + locals.set(chatPromptKey, { + text: resolved, + model: undefined, + config: undefined, + promptId: "", + version: 0, + labels: [], + toAISDKTelemetry: () => ({ + experimental_telemetry: { isEnabled: true, metadata: {} }, + }), + }); + } else { + locals.set(chatPromptKey, resolved); + } +} + +/** + * Read the stored prompt. Throws if `chat.prompt.set()` has not been called. + */ +function getChatPrompt(): ChatPromptValue { + const prompt = locals.get(chatPromptKey); + if (!prompt) { + throw new Error( + "chat.prompt() called before chat.prompt.set(). Set a prompt in onPreload, onChatStart, onTurnStart, or run() first." + ); + } + return prompt; +} + +/** + * Options for {@link toStreamTextOptions}. + */ +export type ToStreamTextOptionsOptions = { + /** Additional telemetry metadata merged into `experimental_telemetry.metadata`. */ + telemetry?: Record; + /** + * An AI SDK provider registry (from `createProviderRegistry`) or any object + * with a `languageModel(id)` method. When provided and the stored prompt has + * a `model` string, the resolved `LanguageModel` is included in the returned + * options so `streamText` uses it directly. + * + * The model string should use the `"provider:model-id"` format + * (e.g. `"openai:gpt-4o"`, `"anthropic:claude-sonnet-4-6"`). + */ + registry?: { languageModel(modelId: string): unknown }; }; + +/** + * Returns an options object ready to spread into `streamText()`. + * + * Includes `system`, `experimental_telemetry`, and any config fields + * (temperature, maxTokens, etc.) from the stored prompt. + * + * When a `registry` is provided and the prompt has a `model` string, + * the resolved `LanguageModel` is included as `model`. + * + * If no prompt has been set, returns `{}` (no-op spread). + */ +function toStreamTextOptions(options?: ToStreamTextOptionsOptions): Record { + const prompt = locals.get(chatPromptKey); + if (!prompt) return {}; + + const result: Record = { + system: prompt.text, + }; + + // Resolve model via registry if both are present + if (options?.registry && prompt.model) { + result.model = options.registry.languageModel(prompt.model); + } + + // Spread config (temperature, maxTokens, etc.) + if (prompt.config) { + Object.assign(result, prompt.config); + } + + // Add telemetry (forward additional metadata from caller) + const telemetry = prompt.toAISDKTelemetry(options?.telemetry); + Object.assign(result, telemetry); + + // Auto-inject prepareStep for compaction, pending messages, and background context injection. + const taskCompaction = locals.get(chatTaskCompactionKey); + const taskPendingMessages = locals.get(chatPendingMessagesKey); + + { + result.prepareStep = async ({ + messages, + steps, + }: { + messages: ModelMessage[]; + steps: CompactionStep[]; + }) => { + let resultMessages: ModelMessage[] | undefined; + + // 1. Compaction + if (taskCompaction) { + const compactResult = await chatCompact(messages, steps, { + shouldCompact: taskCompaction.shouldCompact, + summarize: (msgs) => { + const ctx = locals.get(chatTurnContextKey); + const lastStep = steps.at(-1); + return taskCompaction.summarize({ + messages: msgs, + usage: lastStep?.usage, + source: "inner", + stepNumber: steps.length - 1, + chatId: ctx?.chatId, + turn: ctx?.turn, + clientData: ctx?.clientData, + }); + }, + }); + if (compactResult.type !== "skipped") { + resultMessages = compactResult.messages; + } + } + + // 2. Pending message injection (steering) + if (taskPendingMessages) { + const injected = await drainSteeringQueue( + taskPendingMessages, + resultMessages ?? messages, + steps + ); + if (injected.length > 0) { + resultMessages = [...(resultMessages ?? messages), ...injected]; + } + } + + // 3. Background context injection + const bgQueue = locals.get(chatBackgroundQueueKey); + if (bgQueue && bgQueue.length > 0) { + const injected = bgQueue.splice(0); // drain + resultMessages = [...(resultMessages ?? messages), ...injected]; + } + + return resultMessages ? { messages: resultMessages } : undefined; + }; + } + + return result; +} + +/** + * Options for `pipeChat`. + */ +export type PipeChatOptions = { + /** + * Override the stream key. Must match the `streamKey` on `TriggerChatTransport`. + * @default "chat" + */ + streamKey?: string; + + /** An AbortSignal to cancel the stream. */ + signal?: AbortSignal; + + /** + * The target run ID to pipe to. + * @default "self" (current run) + */ + target?: string; + + /** Override the default span name for this operation. */ + spanName?: string; +}; + +/** + * Options for customizing the `toUIMessageStream()` call used when piping + * `streamText` results to the frontend. + * + * Set static defaults via `uiMessageStreamOptions` on `chat.task()`, or + * override per-turn via `chat.setUIMessageStreamOptions()`. + * + * `onFinish`, `originalMessages`, and `generateMessageId` are omitted because + * they are managed internally for response capture and message accumulation. + * Use `streamText`'s `onFinish` for custom finish handling, or drop down to + * raw task mode with `chat.pipe()` for full control. + */ +export type ChatUIMessageStreamOptions = Omit< + UIMessageStreamOptions, + "onFinish" | "originalMessages" | "generateMessageId" +>; + +/** + * An object with a `toUIMessageStream()` method (e.g. `StreamTextResult` from `streamText()`). + */ +type UIMessageStreamable = { + toUIMessageStream: (...args: any[]) => AsyncIterable | ReadableStream; +}; + +function isUIMessageStreamable(value: unknown): value is UIMessageStreamable { + return ( + typeof value === "object" && + value !== null && + "toUIMessageStream" in value && + typeof (value as any).toUIMessageStream === "function" + ); +} + +function isAsyncIterable(value: unknown): value is AsyncIterable { + return typeof value === "object" && value !== null && Symbol.asyncIterator in value; +} + +function isReadableStream(value: unknown): value is ReadableStream { + return ( + typeof value === "object" && value !== null && typeof (value as any).getReader === "function" + ); +} + +/** + * Pipes a chat stream to the realtime stream, making it available to the + * `TriggerChatTransport` on the frontend. + * + * Accepts: + * - A `StreamTextResult` from `streamText()` (has `.toUIMessageStream()`) + * - An `AsyncIterable` of `UIMessageChunk`s + * - A `ReadableStream` of `UIMessageChunk`s + * + * Must be called from inside a Trigger.dev task's `run` function. + * + * @example + * ```ts + * import { task } from "@trigger.dev/sdk"; + * import { chat, type ChatTaskPayload } from "@trigger.dev/sdk/ai"; + * import { streamText, convertToModelMessages } from "ai"; + * + * export const myChatTask = task({ + * id: "my-chat-task", + * run: async (payload: ChatTaskPayload) => { + * const result = streamText({ + * model: openai("gpt-4o"), + * messages: payload.messages, + * }); + * + * await chat.pipe(result); + * }, + * }); + * ``` + * + * @example + * ```ts + * // Works from anywhere inside a task — even deep in your agent code + * async function runAgentLoop(messages: CoreMessage[]) { + * const result = streamText({ model, messages }); + * await chat.pipe(result); + * } + * ``` + */ +async function pipeChat( + source: UIMessageStreamable | AsyncIterable | ReadableStream, + options?: PipeChatOptions +): Promise { + locals.set(chatPipeCountKey, (locals.get(chatPipeCountKey) ?? 0) + 1); + const streamKey = options?.streamKey ?? CHAT_STREAM_KEY; + + let stream: AsyncIterable | ReadableStream; + + if (isUIMessageStreamable(source)) { + stream = source.toUIMessageStream(); + } else if (isAsyncIterable(source) || isReadableStream(source)) { + stream = source; + } else { + throw new Error( + "pipeChat: source must be a StreamTextResult (with .toUIMessageStream()), " + + "an AsyncIterable, or a ReadableStream" + ); + } + + const pipeOptions: PipeStreamOptions = {}; + if (options?.signal) { + pipeOptions.signal = options.signal; + } + if (options?.target) { + pipeOptions.target = options.target; + } + if (options?.spanName) { + pipeOptions.spanName = options.spanName; + } + + const { waitUntilComplete } = streams.pipe(streamKey, stream, pipeOptions); + await waitUntilComplete(); +} + +/** + * Options for defining a chat task. + * + * Extends the standard `TaskOptions` but pre-types the payload as `ChatTaskPayload` + * and overrides `run` to accept `ChatTaskRunPayload` (with abort signals). + * + * **Auto-piping:** If the `run` function returns a value with `.toUIMessageStream()` + * (like a `StreamTextResult`), the stream is automatically piped to the frontend. + * + * **Single-run mode:** By default, the task uses input streams so that the + * entire conversation lives inside one run. After each AI response, the task + * emits a control chunk and suspends via `messagesInput.wait()`. The frontend + * transport resumes the same run by sending the next message via input streams. + */ +/** + * Event passed to the `onPreload` callback. + */ +export type PreloadEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A scoped access token for this chat run. */ + chatAccessToken: string; + /** Custom data from the frontend. */ + clientData?: TClientData; + /** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */ + writer: ChatWriter; +}; + +/** + * Event passed to the `onChatStart` callback. + */ +export type ChatStartEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The initial model-ready messages for this conversation. */ + messages: ModelMessage[]; + /** Custom data from the frontend (passed via `metadata` on `sendMessage()` or the transport). */ + clientData: TClientData; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A scoped access token for this chat run. Persist this for frontend reconnection. */ + chatAccessToken: string; + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; + /** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */ + writer: ChatWriter; +}; + +/** + * Event passed to the `onTurnStart` callback. + */ +export type TurnStartEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The accumulated model-ready messages (all turns so far, including new user message). */ + messages: ModelMessage[]; + /** The accumulated UI messages (all turns so far, including new user message). */ + uiMessages: TUIM[]; + /** The turn number (0-indexed). */ + turn: number; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A scoped access token for this chat run. */ + chatAccessToken: string; + /** Custom data from the frontend. */ + clientData?: TClientData; + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; + /** Token usage from the previous turn. Undefined on turn 0. */ + previousTurnUsage?: LanguageModelUsage; + /** Cumulative token usage across all completed turns so far. */ + totalUsage: LanguageModelUsage; + /** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */ + writer: ChatWriter; +}; + +/** + * Event passed to the `onTurnComplete` callback. + */ +export type TurnCompleteEvent = { + /** The unique identifier for the chat session. */ + chatId: string; + /** The full accumulated conversation in model format (all turns so far). */ + messages: ModelMessage[]; + /** + * The full accumulated conversation in UI format (all turns so far). + * This is the format expected by `useChat` — store this for persistence. + */ + uiMessages: TUIM[]; + /** + * Only the new model messages from this turn (user message(s) + assistant response). + * Useful for appending to an existing conversation record. + */ + newMessages: ModelMessage[]; + /** + * Only the new UI messages from this turn (user message(s) + assistant response). + * Useful for inserting individual message records instead of overwriting the full history. + */ + newUIMessages: TUIM[]; + /** The assistant's response for this turn, with aborted parts cleaned up when `stopped` is true. Undefined if `pipeChat` was used manually. */ + responseMessage: TUIM | undefined; + /** + * The raw assistant response before abort cleanup. Includes incomplete tool parts + * (`input-available`, `partial-call`) and streaming reasoning/text parts. + * Use this if you need custom cleanup logic. Same as `responseMessage` when not stopped. + */ + rawResponseMessage: TUIM | undefined; + /** The turn number (0-indexed). */ + turn: number; + /** The Trigger.dev run ID for this conversation. */ + runId: string; + /** A fresh scoped access token for this chat run (renewed each turn). Persist this for frontend reconnection. */ + chatAccessToken: string; + /** The last event ID from the stream writer. Use this with `resume: true` to avoid replaying events after refresh. */ + lastEventId?: string; + /** Custom data from the frontend. */ + clientData?: TClientData; + /** Whether the user stopped generation during this turn. */ + stopped: boolean; + /** Whether this run is continuing an existing chat (previous run timed out or was cancelled). False for brand new chats. */ + continuation: boolean; + /** The run ID of the previous run (only set when `continuation` is true). */ + previousRunId?: string; + /** Whether this run was preloaded before the first message. */ + preloaded: boolean; + /** Token usage for this turn. Undefined if usage couldn't be captured (e.g. manual pipeChat). */ + usage?: LanguageModelUsage; + /** Cumulative token usage across all turns in this run (including this turn). */ + totalUsage: LanguageModelUsage; +}; + +/** + * Event passed to the `onBeforeTurnComplete` callback. + * Same as `TurnCompleteEvent` but includes a `writer` since the stream is still open. + */ +export type BeforeTurnCompleteEvent< + TClientData = unknown, + TUIM extends UIMessage = UIMessage, +> = TurnCompleteEvent & { + /** Stream writer — write custom `UIMessageChunk` parts to the chat stream. Lazy: no overhead if unused. */ + writer: ChatWriter; +}; + +export type ChatTaskOptions< + TIdentifier extends string, + TClientDataSchema extends TaskSchema | undefined = undefined, + TUIMessage extends UIMessage = UIMessage, +> = Omit< + TaskOptions< + TIdentifier, + ChatTaskWirePayload>, + unknown + >, + "run" +> & { + /** + * Schema for validating `clientData` from the frontend. + * Accepts Zod, ArkType, Valibot, or any supported schema library. + * When provided, `clientData` is parsed and typed in all hooks and `run`. + * + * @example + * ```ts + * import { z } from "zod"; + * + * chat.task({ + * id: "my-chat", + * clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), + * run: async ({ messages, clientData, signal }) => { + * // clientData is typed as { model?: string; userId: string } + * }, + * }); + * ``` + */ + clientDataSchema?: TClientDataSchema; + + /** + * The run function for the chat task. + * + * Receives a `ChatTaskRunPayload` with the conversation messages, chat session ID, + * trigger type, and abort signals (`signal`, `cancelSignal`, `stopSignal`). + * + * **Auto-piping:** If this function returns a value with `.toUIMessageStream()`, + * the stream is automatically piped to the frontend. + */ + run: (payload: ChatTaskRunPayload>) => Promise; + + /** + * Called when a preloaded run starts, before the first message arrives. + * + * Use this to initialize state, create DB records, and load context early — + * so everything is ready when the user's first message comes through. + * + * @example + * ```ts + * onPreload: async ({ chatId, clientData }) => { + * await db.chat.create({ data: { id: chatId } }); + * userContext.init(await loadUser(clientData.userId)); + * } + * ``` + */ + onPreload?: (event: PreloadEvent>) => Promise | void; + + /** + * Called on the first turn (turn 0) of a new run, before the `run` function executes. + * + * Use this to create the chat record in your database when a new conversation starts. + * + * @example + * ```ts + * onChatStart: async ({ chatId, messages, clientData }) => { + * await db.chat.create({ data: { id: chatId, userId: clientData.userId } }); + * } + * ``` + */ + onChatStart?: (event: ChatStartEvent>) => Promise | void; + + /** + * Called at the start of every turn, after message accumulation and `onChatStart` (turn 0), + * but before the `run` function executes. + * + * Use this to persist messages before streaming begins, so a mid-stream page refresh + * still shows the user's message. + * + * @example + * ```ts + * onTurnStart: async ({ chatId, uiMessages }) => { + * await db.chat.update({ where: { id: chatId }, data: { messages: uiMessages } }); + * } + * ``` + */ + onTurnStart?: ( + event: TurnStartEvent, TUIMessage> + ) => Promise | void; + + /** + * Called after the response is captured but before the stream closes. + * The stream is still open, so you can write custom chunks to the frontend + * (e.g. compaction progress). Use this for compaction, post-processing, + * or any work where the user should see real-time status updates. + * + * @example + * ```ts + * onBeforeTurnComplete: async ({ writer, usage }) => { + * if (usage?.inputTokens && usage.inputTokens > 5000) { + * writer.write({ type: "data-compaction", id: generateId(), data: { status: "compacting" } }); + * // ... compact messages ... + * chat.setMessages(compactedMessages); + * writer.write({ type: "data-compaction", id: generateId(), data: { status: "complete" } }); + * } + * } + * ``` + */ + onBeforeTurnComplete?: ( + event: BeforeTurnCompleteEvent, TUIMessage> + ) => Promise | void; + + /** + * Called when conversation compaction occurs (via `chat.compact()` or + * `chat.compactionStep()`). Use for logging, billing, or persisting the summary. + * + * @example + * ```ts + * onCompacted: async ({ summary, totalTokens, chatId }) => { + * logger.info("Compacted", { totalTokens, chatId }); + * await db.compactionLog.create({ data: { chatId, summary } }); + * } + * ``` + */ + onCompacted?: (event: CompactedEvent) => Promise | void; + + /** + * Automatic context compaction. When provided, compaction runs automatically + * in both the inner loop (prepareStep, between tool-call steps) and the + * outer loop (between turns, for single-step responses where prepareStep + * never fires). + * + * The `shouldCompact` callback decides when to compact, and `summarize` + * generates the summary. The prepareStep is auto-injected into + * `chat.toStreamTextOptions()` — if you provide your own `prepareStep` + * after spreading, it overrides the auto-injected one. + * + * @example + * ```ts + * chat.task({ + * id: "my-chat", + * compaction: { + * shouldCompact: ({ totalTokens }) => (totalTokens ?? 0) > 80_000, + * summarize: async (messages) => + * generateText({ model, messages: [...messages, { role: "user", content: "Summarize." }] }) + * .then((r) => r.text), + * }, + * run: async ({ messages, signal }) => { + * return streamText({ ...chat.toStreamTextOptions({ registry }), messages }); + * }, + * }); + * ``` + */ + compaction?: ChatTaskCompactionOptions; + + /** + * Configure how messages that arrive during streaming are handled. + * + * By default, messages queue for the next turn. When `shouldInject` is provided + * and returns `true`, messages are injected between tool-call steps via + * `prepareStep` — allowing users to steer the agent mid-execution. + * + * @example + * ```ts + * pendingMessages: { + * shouldInject: ({ steps }) => steps.length > 0, + * onReceived: ({ message }) => logger.info("Steering message received"), + * }, + * ``` + */ + pendingMessages?: PendingMessagesOptions; + + /** + * Called after each assistant response completes. Use to persist the + * conversation to your database after each assistant response. + * + * @example + * ```ts + * onTurnComplete: async ({ chatId, messages }) => { + * await db.chat.update({ where: { id: chatId }, data: { messages } }); + * } + * ``` + */ + onTurnComplete?: ( + event: TurnCompleteEvent, TUIMessage> + ) => Promise | void; + + /** + * Maximum number of conversational turns (message round-trips) a single run + * will handle before ending. After this many turns the run completes + * normally and the next message will start a fresh run. + * + * @default 100 + */ + maxTurns?: number; + + /** + * How long to wait for the next message before timing out and ending the run. + * Accepts any duration string (e.g. `"1h"`, `"30m"`). + * + * @default "1h" + */ + turnTimeout?: string; + + /** + * How long (in seconds) the run stays idle (active, using compute) after each + * turn, waiting for the next message. During this window responses are instant. + * After this timeout the run suspends (frees compute) and waits via + * `inputStream.wait()`. + * + * Set to `0` to suspend immediately after each turn. + * + * @default 30 + */ + idleTimeoutInSeconds?: number; + + /** + * How long the `chatAccessToken` (scoped to this run) remains valid. + * A fresh token is minted after each turn, so this only needs to cover + * the gap between turns. + * + * Accepts a duration string (e.g. `"1h"`, `"30m"`, `"2h"`). + * + * @default "1h" + */ + chatAccessTokenTTL?: string; + + /** + * How long (in seconds) the run stays idle after `onPreload` fires, + * waiting for the first message before suspending. + * + * Only applies to preloaded runs (triggered via `transport.preload()`). + * + * @default Same as `idleTimeoutInSeconds` + */ + preloadIdleTimeoutInSeconds?: number; + + /** + * How long to wait (suspended) for the first message after a preloaded run starts. + * If no message arrives within this time, the run ends. + * + * Only applies to preloaded runs. + * + * @default Same as `turnTimeout` + */ + preloadTimeout?: string; + + /** + * Transform model messages before they're used anywhere — in `run()`, + * in compaction rebuilds, and in compaction results. + * + * Define once, applied everywhere. Use for Anthropic cache breaks, + * injecting system context, stripping PII, etc. + * + * @example + * ```ts + * prepareMessages: async ({ messages, reason }) => { + * // Add Anthropic cache breaks to the last message + * if (messages.length === 0) return messages; + * const last = messages[messages.length - 1]; + * return [...messages.slice(0, -1), { + * ...last, + * providerOptions: { ...last.providerOptions, anthropic: { cacheControl: { type: "ephemeral" } } }, + * }]; + * } + * ``` + */ + prepareMessages?: ( + event: PrepareMessagesEvent> + ) => ModelMessage[] | Promise; + + /** + * Default options for `toUIMessageStream()` when auto-piping or using + * `turn.complete()` / `chat.pipeAndCapture()`. + * + * Controls how the `StreamTextResult` is converted to a `UIMessageChunk` + * stream — error handling, reasoning/source visibility, metadata, etc. + * + * Can be overridden per-turn by calling `chat.setUIMessageStreamOptions()` + * inside `run()` or lifecycle hooks. Per-turn values are merged on top + * of these defaults (per-turn wins on conflicts). + * + * `onFinish`, `originalMessages`, and `generateMessageId` are managed + * internally and cannot be overridden here. Use `streamText`'s `onFinish` + * for custom finish handling, or drop to raw task mode for full control. + * + * @example + * ```ts + * chat.task({ + * id: "my-chat", + * uiMessageStreamOptions: { + * sendReasoning: true, + * onError: (error) => error instanceof Error ? error.message : "An error occurred.", + * }, + * run: async ({ messages, signal }) => { ... }, + * }); + * ``` + */ + uiMessageStreamOptions?: ChatUIMessageStreamOptions; +}; + +/** + * Creates a Trigger.dev task pre-configured for AI SDK chat. + * + * - **Pre-types the payload** as `ChatTaskRunPayload` — includes abort signals + * - **Auto-pipes the stream** if `run` returns a `StreamTextResult` + * - **Multi-turn**: keeps the conversation in a single run using input streams + * - **Stop support**: frontend can stop generation mid-stream via the stop input stream + * - For complex flows, use `pipeChat()` from anywhere inside your task code + * + * @example + * ```ts + * import { chat } from "@trigger.dev/sdk/ai"; + * import { streamText, convertToModelMessages } from "ai"; + * import { openai } from "@ai-sdk/openai"; + * + * export const myChat = chat.task({ + * id: "my-chat", + * run: async ({ messages, signal }) => { + * return streamText({ + * model: openai("gpt-4o"), + * messages, // already converted via convertToModelMessages + * abortSignal: signal, + * }); + * }, + * }); + * ``` + */ +function chatTask< + TIdentifier extends string, + TClientDataSchema extends TaskSchema | undefined = undefined, + TUIMessage extends UIMessage = UIMessage, +>( + options: ChatTaskOptions +): Task>, unknown> { + const { + run: userRun, + clientDataSchema, + onPreload, + onChatStart, + onTurnStart, + onBeforeTurnComplete, + onCompacted, + compaction, + pendingMessages: pendingMessagesConfig, + prepareMessages, + onTurnComplete, + maxTurns = 100, + turnTimeout = "1h", + idleTimeoutInSeconds = 30, + chatAccessTokenTTL = "1h", + preloadIdleTimeoutInSeconds, + preloadTimeout, + uiMessageStreamOptions, + ...restOptions + } = options; + + const parseClientData = clientDataSchema ? getSchemaParseFn(clientDataSchema) : undefined; + + return createTask< + TIdentifier, + ChatTaskWirePayload>, + unknown + >({ + ...restOptions, + run: async ( + payload: ChatTaskWirePayload>, + { signal: runSignal } + ) => { + // Set gen_ai.conversation.id on the run-level span for dashboard context + const activeSpan = trace.getActiveSpan(); + if (activeSpan) { + activeSpan.setAttribute("gen_ai.conversation.id", payload.chatId); + } + + // Store static UIMessageStream options in locals so resolveUIMessageStreamOptions() can read them + if (uiMessageStreamOptions) { + locals.set(chatUIStreamStaticKey, uiMessageStreamOptions); + } + + // Store onCompacted hook in locals so chat.compact() can call it + if (onCompacted) { + locals.set(chatOnCompactedKey, onCompacted); + } + + if (prepareMessages) { + locals.set(chatPrepareMessagesKey, prepareMessages); + } + + if (compaction) { + locals.set( + chatTaskCompactionKey, + compaction as unknown as ChatTaskCompactionOptions + ); + } + + if (pendingMessagesConfig) { + locals.set(chatPendingMessagesKey, pendingMessagesConfig); + } + + let currentWirePayload = payload; + const continuation = payload.continuation ?? false; + const previousRunId = payload.previousRunId; + const preloaded = payload.trigger === "preload"; + + // Accumulated model messages across turns. Turn 1 initialises from the + // full history the frontend sends; subsequent turns append only the new + // user message(s) and the captured assistant response. + let accumulatedMessages: ModelMessage[] = []; + + // Accumulated UI messages for persistence. Mirrors the model accumulator + // but in frontend-friendly UIMessage format (with parts, id, etc.). + let accumulatedUIMessages: TUIMessage[] = []; + + // Token usage tracking across turns + let previousTurnUsage: LanguageModelUsage | undefined; + let cumulativeUsage: LanguageModelUsage = emptyUsage(); + + // Mutable reference to the current turn's stop controller so the + // stop input stream listener (registered once) can abort the right turn. + let currentStopController: AbortController | undefined; + + // Listen for stop signals for the lifetime of the run + const stopSub = stopInput.on((data) => { + currentStopController?.abort(data?.message || "stopped"); + }); + + try { + // Handle preloaded runs — fire onPreload, then wait for the first real message + if (preloaded) { + if (activeSpan) { + activeSpan.setAttribute("chat.preloaded", true); + } + + const currentRunId = taskContext.ctx?.run.id ?? ""; + let preloadAccessToken = ""; + if (currentRunId) { + try { + preloadAccessToken = await auth.createPublicToken({ + scopes: { + read: { runs: currentRunId }, + write: { inputStreams: currentRunId }, + }, + expirationTime: chatAccessTokenTTL, + }); + } catch { + // Token creation failed + } + } + + // Parse client data for the preload hook + const preloadClientData = ( + parseClientData ? await parseClientData(payload.metadata) : payload.metadata + ) as inferSchemaOut; + + // Fire onPreload hook + if (onPreload) { + await tracer.startActiveSpan( + "onPreload()", + async () => { + await withChatWriter(async (writer) => { + await onPreload({ + chatId: payload.chatId, + runId: currentRunId, + chatAccessToken: preloadAccessToken, + clientData: preloadClientData, + writer, + }); + }); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", + [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": payload.chatId, + "chat.preloaded": true, + }, + } + ); + } + + // Wait for the first real message — use preload-specific timeouts if configured + const effectivePreloadIdleTimeout = + payload.idleTimeoutInSeconds ?? preloadIdleTimeoutInSeconds ?? idleTimeoutInSeconds; + + const effectivePreloadTimeout = + (metadata.get(TURN_TIMEOUT_METADATA_KEY) as string | undefined) ?? + preloadTimeout ?? + turnTimeout; + + const preloadResult = await messagesInput.waitWithIdleTimeout({ + idleTimeoutInSeconds: effectivePreloadIdleTimeout, + timeout: effectivePreloadTimeout, + spanName: "waiting for first message", + }); + + if (!preloadResult.ok) { + return; // Timed out waiting for first message — end run + } + + let firstMessage = preloadResult.output; + + currentWirePayload = firstMessage as ChatTaskWirePayload< + TUIMessage, + inferSchemaIn + >; + } + + for (let turn = 0; turn < maxTurns; turn++) { + // Extract turn-level context before entering the span + const { metadata: wireMetadata, messages: uiMessages, ...restWire } = currentWirePayload; + const clientData = ( + parseClientData ? await parseClientData(wireMetadata) : wireMetadata + ) as inferSchemaOut; + const lastUserMessage = extractLastUserMessageText(uiMessages); + + const turnAttributes: Attributes = { + "turn.number": turn + 1, + "gen_ai.conversation.id": currentWirePayload.chatId, + "gen_ai.operation.name": "chat", + "chat.trigger": currentWirePayload.trigger, + [SemanticInternalAttributes.STYLE_ICON]: "tabler-message-chatbot", + [SemanticInternalAttributes.ENTITY_TYPE]: "chat-turn", + }; + + if (lastUserMessage) { + turnAttributes["chat.user_message"] = lastUserMessage; + + // Show a truncated preview of the user message as an accessory + const preview = + lastUserMessage.length > 80 ? lastUserMessage.slice(0, 80) + "..." : lastUserMessage; + Object.assign( + turnAttributes, + accessoryAttributes({ + items: [{ text: preview, variant: "normal" }], + style: "codepath", + }) + ); + } + + if (wireMetadata !== undefined) { + turnAttributes["chat.client_data"] = + typeof wireMetadata === "string" ? wireMetadata : JSON.stringify(wireMetadata); + } + + const turnResult = await tracer.startActiveSpan( + `chat turn ${turn + 1}`, + async (turnSpan) => { + locals.set(chatPipeCountKey, 0); + locals.set(chatDeferKey, new Set()); + locals.set(chatCompactionStateKey, undefined); + locals.set(chatSteeringQueueKey, []); + // NOTE: chatBackgroundQueueKey is NOT reset here — messages injected + // by deferred work from the previous turn's onTurnComplete need to + // survive into the next turn. The queue is drained before run(). + locals.set(chatInjectedMessageIdsKey, new Set()); + + // Store chat context for auto-detection by task-tool subtasks (ai.toolExecute / legacy ai.tool) + locals.set(chatTurnContextKey, { + chatId: currentWirePayload.chatId, + turn, + continuation, + clientData, + }); + + // Per-turn stop controller (reset each turn) + const stopController = new AbortController(); + currentStopController = stopController; + locals.set(chatStopControllerKey, stopController); + + // Three signals for the user's run function + const stopSignal = stopController.signal; + const cancelSignal = runSignal; + const combinedSignal = AbortSignal.any([runSignal, stopController.signal]); + + // Buffer messages that arrive during streaming + const pendingMessages: ChatTaskWirePayload< + TUIMessage, + inferSchemaIn + >[] = []; + const pmConfig = locals.get(chatPendingMessagesKey); + const msgSub = messagesInput.on(async (msg) => { + // If pendingMessages is configured, route to the steering queue + // instead of the wire buffer. The frontend handles re-sending + // non-injected messages via sendMessage on turn complete. + if (pmConfig) { + const lastUIMessage = msg.messages?.[msg.messages.length - 1]; + if (lastUIMessage) { + if (pmConfig.onReceived) { + try { + await pmConfig.onReceived({ + message: lastUIMessage as TUIMessage, + chatId: currentWirePayload.chatId, + turn, + }); + } catch { + /* non-fatal */ + } + } + + try { + const queue = locals.get(chatSteeringQueueKey) ?? []; + // Deduplicate by message ID — guards against double-sends + if ( + lastUIMessage.id && + queue.some((e) => e.uiMessage.id === lastUIMessage.id) + ) { + return; + } + const modelMsgs = await toModelMessages([lastUIMessage]); + queue.push({ + uiMessage: lastUIMessage as UIMessage, + modelMessages: modelMsgs, + }); + locals.set(chatSteeringQueueKey, queue); + } catch { + /* conversion failed — skip steering queue */ + } + } + return; // Don't add to wire buffer — frontend handles non-injected case + } + + // No pendingMessages config — standard wire buffer for next turn + pendingMessages.push( + msg as ChatTaskWirePayload> + ); + }); + + // Clean up any incomplete tool parts in the incoming history. + // When a previous run was stopped mid-tool-call, the frontend's + // useChat state may still contain assistant messages with tool parts + // in partial/input-available state. These cause API errors (e.g. + // Anthropic requires every tool_use to have a matching tool_result). + const cleanedUIMessages = uiMessages.map((msg) => + msg.role === "assistant" ? cleanupAbortedParts(msg) : msg + ); + + // Convert the incoming UIMessages to model messages and update the accumulator. + // Turn 1: full history from the frontend → replaces the accumulator. + // Turn 2+: only the new message(s) → appended to the accumulator. + const incomingModelMessages = await toModelMessages(cleanedUIMessages); + + // Track new messages for this turn (user input + assistant response). + const turnNewModelMessages: ModelMessage[] = []; + const turnNewUIMessages: TUIMessage[] = []; + + if (turn === 0) { + accumulatedMessages = incomingModelMessages; + accumulatedUIMessages = [...cleanedUIMessages]; + // On first turn, the "new" messages are just the last user message + // (the rest is history). We'll add the response after streaming. + if (cleanedUIMessages.length > 0) { + turnNewUIMessages.push(cleanedUIMessages[cleanedUIMessages.length - 1]!); + const lastModel = incomingModelMessages[incomingModelMessages.length - 1]; + if (lastModel) turnNewModelMessages.push(lastModel); + } + } else if (currentWirePayload.trigger === "regenerate-message") { + // Regenerate: frontend sent full history with last assistant message + // removed. Reset the accumulator to match. + accumulatedMessages = incomingModelMessages; + accumulatedUIMessages = [...cleanedUIMessages]; + // No new user messages for regenerate — just the response (added below) + } else { + // Submit: frontend sent only the new user message(s). Append to accumulator. + accumulatedMessages.push(...incomingModelMessages); + accumulatedUIMessages.push(...cleanedUIMessages); + turnNewModelMessages.push(...incomingModelMessages); + turnNewUIMessages.push(...cleanedUIMessages); + } + + // Mint a scoped public access token once per turn, reused for + // onChatStart, onTurnStart, onTurnComplete, and the turn-complete chunk. + const currentRunId = taskContext.ctx?.run.id ?? ""; + let turnAccessToken = ""; + if (currentRunId) { + try { + turnAccessToken = await auth.createPublicToken({ + scopes: { + read: { runs: currentRunId }, + write: { inputStreams: currentRunId }, + }, + expirationTime: chatAccessTokenTTL, + }); + } catch { + // Token creation failed + } + } + + // Fire onChatStart on the first turn + if (turn === 0 && onChatStart) { + await tracer.startActiveSpan( + "onChatStart()", + async () => { + await withChatWriter(async (writer) => { + await onChatStart({ + chatId: currentWirePayload.chatId, + messages: accumulatedMessages, + clientData, + runId: currentRunId, + chatAccessToken: turnAccessToken, + continuation, + previousRunId, + preloaded, + writer, + }); + }); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", + [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": currentWirePayload.chatId, + "chat.messages.count": accumulatedMessages.length, + "chat.continuation": continuation, + "chat.preloaded": preloaded, + ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), + }, + } + ); + } + + // Fire onTurnStart before running user code — persist messages + // so a mid-stream page refresh still shows the user's message. + if (onTurnStart) { + await tracer.startActiveSpan( + "onTurnStart()", + async () => { + await withChatWriter(async (writer) => { + await onTurnStart({ + chatId: currentWirePayload.chatId, + messages: accumulatedMessages, + uiMessages: accumulatedUIMessages, + turn, + runId: currentRunId, + chatAccessToken: turnAccessToken, + clientData, + continuation, + previousRunId, + preloaded, + previousTurnUsage, + totalUsage: cumulativeUsage, + writer, + }); + }); + + // Check if onTurnStart replaced messages (compaction) + const turnStartOverride = locals.get(chatOverrideMessagesKey); + if (turnStartOverride) { + locals.set(chatOverrideMessagesKey, undefined); + accumulatedUIMessages = [...turnStartOverride] as TUIMessage[]; + accumulatedMessages = await toModelMessages(turnStartOverride); + } + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onStart", + [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": currentWirePayload.chatId, + "chat.turn": turn + 1, + "chat.messages.count": accumulatedMessages.length, + "chat.trigger": currentWirePayload.trigger, + "chat.continuation": continuation, + "chat.preloaded": preloaded, + ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), + }, + } + ); + } + + // Captured by the onFinish callback below — works even on abort/stop. + let capturedResponseMessage: TUIMessage | undefined; + + // Promise that resolves when the AI SDK's onFinish fires. + // On abort, the stream's cancel() handler calls onFinish + // asynchronously AFTER pipeChat resolves, so we must await + // this to avoid a race where we check capturedResponseMessage + // before it's been set. + let resolveOnFinish: () => void; + const onFinishPromise = new Promise((r) => { + resolveOnFinish = r; + }); + let onFinishAttached = false; + let runResult: unknown; + + try { + // Drain any messages injected by background work (e.g. self-review from previous turn) + const bgQueue = locals.get(chatBackgroundQueueKey); + if (bgQueue && bgQueue.length > 0) { + accumulatedMessages.push(...bgQueue.splice(0)); + } + + runResult = await userRun({ + ...restWire, + messages: await applyPrepareMessages(accumulatedMessages, "run"), + clientData, + continuation, + previousRunId, + preloaded, + previousTurnUsage, + totalUsage: cumulativeUsage, + signal: combinedSignal, + cancelSignal, + stopSignal, + } as any); + + // Auto-pipe if the run function returned a StreamTextResult or similar, + // but only if pipeChat() wasn't already called manually during this turn. + // We call toUIMessageStream ourselves to attach onFinish for response capture. + if ((locals.get(chatPipeCountKey) ?? 0) === 0 && isUIMessageStreamable(runResult)) { + onFinishAttached = true; + const uiStream = runResult.toUIMessageStream({ + ...resolveUIMessageStreamOptions(), + onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { + capturedResponseMessage = responseMessage as TUIMessage; + resolveOnFinish!(); + }, + }); + await pipeChat(uiStream, { signal: combinedSignal, spanName: "stream response" }); + } + } catch (error) { + // Handle AbortError from streamText gracefully + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) { + return "exit"; // Full run cancellation — exit + } + // Stop generation — fall through to continue the loop + } else { + throw error; + } + } finally { + msgSub.off(); + } + + // Wait for onFinish to fire — on abort this may resolve slightly + // after pipeChat, since the stream's cancel() handler is async. + if (onFinishAttached) { + await onFinishPromise; + } + + // Capture token usage from the streamText result (if available). + // totalUsage is a PromiseLike that resolves after the stream is consumed. + let turnUsage: LanguageModelUsage | undefined; + if (runResult != null && typeof (runResult as any).totalUsage?.then === "function") { + try { + turnUsage = await (runResult as any).totalUsage; + } catch { + /* non-fatal — usage capture failed */ + } + } + if (turnUsage) { + cumulativeUsage = addUsage(cumulativeUsage, turnUsage); + previousTurnUsage = turnUsage; + + // Add usage attributes to the turn span + if (turnUsage.inputTokens != null) { + turnSpan.setAttribute("gen_ai.usage.input_tokens", turnUsage.inputTokens); + } + if (turnUsage.outputTokens != null) { + turnSpan.setAttribute("gen_ai.usage.output_tokens", turnUsage.outputTokens); + } + if (turnUsage.totalTokens != null) { + turnSpan.setAttribute("gen_ai.usage.total_tokens", turnUsage.totalTokens); + } + if (cumulativeUsage.totalTokens != null) { + turnSpan.setAttribute( + "gen_ai.usage.cumulative_total_tokens", + cumulativeUsage.totalTokens + ); + } + if (cumulativeUsage.inputTokens != null) { + turnSpan.setAttribute( + "gen_ai.usage.cumulative_input_tokens", + cumulativeUsage.inputTokens + ); + } + if (cumulativeUsage.outputTokens != null) { + turnSpan.setAttribute( + "gen_ai.usage.cumulative_output_tokens", + cumulativeUsage.outputTokens + ); + } + } + + // Check if run() (e.g. via prepareStep) replaced messages during this turn. + // This supports intra-turn compaction — the compacted messages become the + // new base, and the response gets appended on top. + const runOverride = locals.get(chatOverrideMessagesKey); + if (runOverride) { + locals.set(chatOverrideMessagesKey, undefined); + accumulatedUIMessages = [...runOverride] as TUIMessage[]; + accumulatedMessages = await toModelMessages(runOverride); + } + + // Check if compaction set a model-only override (preserves UI messages). + // Apply compactUIMessages/compactModelMessages callbacks if configured. + const modelOnlyOverride = locals.get(chatOverrideModelMessagesKey); + if (modelOnlyOverride) { + const compactionSummary = locals.get(chatCompactionStateKey)?.summary ?? ""; + const taskCompactionConfig = locals.get(chatTaskCompactionKey); + locals.set(chatOverrideModelMessagesKey, undefined); + + const compactEvent: CompactMessagesEvent = { + summary: compactionSummary, + uiMessages: accumulatedUIMessages, + modelMessages: accumulatedMessages, + chatId: currentWirePayload.chatId, + turn, + clientData, + source: "inner", + }; + + // Apply model messages: callback or default (use override) + accumulatedMessages = taskCompactionConfig?.compactModelMessages + ? await taskCompactionConfig.compactModelMessages(compactEvent) + : modelOnlyOverride; + + // Apply UI messages: callback or default (preserve all) + if (taskCompactionConfig?.compactUIMessages) { + accumulatedUIMessages = (await taskCompactionConfig.compactUIMessages( + compactEvent + )) as TUIMessage[]; + } + } + + // Determine if the user stopped generation this turn (not a full run cancel). + const wasStopped = stopController.signal.aborted && !runSignal.aborted; + + // Append the assistant's response (partial or complete) to the accumulator. + // The onFinish callback fires even on abort/stop, so partial responses + // from stopped generation are captured correctly. + let rawResponseMessage: TUIMessage | undefined; + if (capturedResponseMessage) { + // Keep the raw message before cleanup for users who want custom handling + rawResponseMessage = capturedResponseMessage; + // Clean up aborted parts (streaming tool calls, reasoning) when stopped + if (wasStopped) { + capturedResponseMessage = cleanupAbortedParts(capturedResponseMessage); + } + // Ensure the response message has an ID (the stream's onFinish + // may produce a message with an empty ID since IDs are normally + // assigned by the frontend's useChat). + if (!capturedResponseMessage.id) { + capturedResponseMessage = { ...capturedResponseMessage, id: generateMessageId() }; + } + accumulatedUIMessages.push(capturedResponseMessage); + turnNewUIMessages.push(capturedResponseMessage); + try { + const responseModelMessages = await toModelMessages([ + stripProviderMetadata(capturedResponseMessage), + ]); + accumulatedMessages.push(...responseModelMessages); + turnNewModelMessages.push(...responseModelMessages); + } catch { + // Conversion failed — skip accumulation for this turn + } + } + // TODO: When the user calls `pipeChat` manually instead of returning a + // StreamTextResult, we don't have access to onFinish. A future iteration + // should let manual-mode users report back response messages for + // accumulation (e.g. via a `chat.addMessages()` helper). + + if (runSignal.aborted) return "exit"; + + // Await deferred background work (e.g. DB writes from onTurnStart) + // before firing hooks so they can rely on the work being done. + const deferredWork = locals.get(chatDeferKey); + if (deferredWork && deferredWork.size > 0) { + await Promise.race([ + Promise.allSettled(deferredWork), + new Promise((r) => setTimeout(r, 5_000)), + ]); + } + + // Outer-loop compaction: runs between turns for single-step responses + // where prepareStep never fires (no tool calls = no step boundaries). + // Only triggers when: task has compaction configured, prepareStep didn't + // already compact this turn, and shouldCompact returns true. + const outerCompaction = locals.get(chatTaskCompactionKey); + const innerCompactionState = locals.get(chatCompactionStateKey); + + if (outerCompaction && !innerCompactionState && turnUsage && !wasStopped) { + const shouldTrigger = await outerCompaction.shouldCompact({ + messages: accumulatedMessages, + totalTokens: turnUsage.totalTokens, + inputTokens: turnUsage.inputTokens, + outputTokens: turnUsage.outputTokens, + usage: turnUsage, + totalUsage: cumulativeUsage, + chatId: currentWirePayload.chatId, + turn, + clientData, + source: "outer", + }); + + if (shouldTrigger) { + await tracer.startActiveSpan( + "context compaction (outer loop)", + async (compactionSpan) => { + const compactionId = generateMessageId(); + + const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { + spanName: "stream compaction chunks", + collapsed: true, + execute: async ({ write, merge }) => { + write({ + type: "data-compaction", + id: compactionId, + data: { status: "compacting", totalTokens: turnUsage.totalTokens }, + }); + + const summary = await outerCompaction.summarize({ + messages: accumulatedMessages, + usage: turnUsage, + totalUsage: cumulativeUsage, + chatId: currentWirePayload.chatId, + turn, + clientData, + source: "outer", + }); + + // Apply compactModelMessages/compactUIMessages callbacks, or defaults. + + const outerCompactEvent: CompactMessagesEvent = { + summary, + uiMessages: accumulatedUIMessages, + modelMessages: accumulatedMessages, + chatId: currentWirePayload.chatId, + turn, + clientData, + source: "outer", + }; + + // Model messages: callback or default (replace with summary) + accumulatedMessages = outerCompaction.compactModelMessages + ? await outerCompaction.compactModelMessages(outerCompactEvent) + : [ + { + role: "assistant" as const, + content: [ + { + type: "text" as const, + text: `[Conversation summary]\n\n${summary}`, + }, + ], + }, + ]; + + // UI messages: callback or default (preserve all) + if (outerCompaction.compactUIMessages) { + accumulatedUIMessages = (await outerCompaction.compactUIMessages( + outerCompactEvent + )) as TUIMessage[]; + } + + // Fire onCompacted hook + const onCompactedHook = locals.get(chatOnCompactedKey); + if (onCompactedHook) { + await onCompactedHook({ + summary, + messages: accumulatedMessages, + messageCount: accumulatedMessages.length, + usage: turnUsage, + totalTokens: turnUsage.totalTokens, + inputTokens: turnUsage.inputTokens, + outputTokens: turnUsage.outputTokens, + stepNumber: -1, // outer loop, not a step + chatId: currentWirePayload.chatId, + turn, + writer: { write, merge }, + }); + } + + compactionSpan.setAttribute("compaction.summary_length", summary.length); + + write({ + type: "data-compaction", + id: compactionId, + data: { status: "complete", totalTokens: turnUsage.totalTokens }, + }); + }, + }); + await waitUntilComplete(); + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "tabler-scissors", + "compaction.total_tokens": turnUsage.totalTokens ?? 0, + "compaction.input_tokens": turnUsage.inputTokens ?? 0, + "compaction.message_count": accumulatedMessages.length, + "compaction.outer_loop": true, + "compaction.turn": turn, + ...(currentWirePayload.chatId + ? { "compaction.chat_id": currentWirePayload.chatId } + : {}), + ...accessoryAttributes({ + items: [ + { text: `${turnUsage.totalTokens ?? 0} tokens`, variant: "normal" }, + { text: `${accumulatedMessages.length} msgs`, variant: "normal" }, + { text: "outer loop", variant: "normal" }, + ], + style: "codepath", + }), + }, + } + ); + } + } + + const turnCompleteEvent = { + chatId: currentWirePayload.chatId, + messages: accumulatedMessages, + uiMessages: accumulatedUIMessages, + newMessages: turnNewModelMessages, + newUIMessages: turnNewUIMessages, + responseMessage: capturedResponseMessage, + rawResponseMessage, + turn, + runId: currentRunId, + chatAccessToken: turnAccessToken, + clientData, + stopped: wasStopped, + continuation, + previousRunId, + preloaded, + usage: turnUsage, + totalUsage: cumulativeUsage, + }; + + // Fire onBeforeTurnComplete — stream is still open so the hook + // can write custom chunks to the frontend (e.g. compaction progress). + if (onBeforeTurnComplete) { + await tracer.startActiveSpan( + "onBeforeTurnComplete()", + async () => { + await withChatWriter(async (writer) => { + await onBeforeTurnComplete({ ...turnCompleteEvent, writer }); + }); + + // Check if the hook replaced messages (compaction) + const override = locals.get(chatOverrideMessagesKey); + if (override) { + locals.set(chatOverrideMessagesKey, undefined); + accumulatedUIMessages = [...override] as TUIMessage[]; + accumulatedMessages = await toModelMessages(override); + // Update event so onTurnComplete sees compacted messages + turnCompleteEvent.messages = accumulatedMessages; + turnCompleteEvent.uiMessages = accumulatedUIMessages; + } + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onComplete", + [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": currentWirePayload.chatId, + "chat.turn": turn + 1, + }, + } + ); + } + + // Write turn-complete control chunk — closes the frontend stream. + const turnCompleteResult = await writeTurnCompleteChunk( + currentWirePayload.chatId, + turnAccessToken + ); + + // Fire onTurnComplete — stream is closed, use for persistence. + if (onTurnComplete) { + await tracer.startActiveSpan( + "onTurnComplete()", + async () => { + await onTurnComplete({ + ...turnCompleteEvent, + lastEventId: turnCompleteResult.lastEventId, + }); + + // Check if onTurnComplete replaced messages (compaction) + const turnCompleteOverride = locals.get(chatOverrideMessagesKey); + if (turnCompleteOverride) { + locals.set(chatOverrideMessagesKey, undefined); + accumulatedUIMessages = [...turnCompleteOverride] as TUIMessage[]; + accumulatedMessages = await toModelMessages(turnCompleteOverride); + } + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "task-hook-onComplete", + [SemanticInternalAttributes.COLLAPSED]: true, + "chat.id": currentWirePayload.chatId, + "chat.turn": turn + 1, + "chat.stopped": wasStopped, + "chat.continuation": continuation, + "chat.preloaded": preloaded, + ...(previousRunId ? { "chat.previous_run_id": previousRunId } : {}), + "chat.messages.count": accumulatedMessages.length, + "chat.response.parts.count": capturedResponseMessage?.parts?.length ?? 0, + "chat.new_messages.count": turnNewUIMessages.length, + ...(turnUsage?.inputTokens != null + ? { "gen_ai.usage.input_tokens": turnUsage.inputTokens } + : {}), + ...(turnUsage?.outputTokens != null + ? { "gen_ai.usage.output_tokens": turnUsage.outputTokens } + : {}), + ...(turnUsage?.totalTokens != null + ? { "gen_ai.usage.total_tokens": turnUsage.totalTokens } + : {}), + ...(cumulativeUsage.totalTokens != null + ? { "gen_ai.usage.cumulative_total_tokens": cumulativeUsage.totalTokens } + : {}), + }, + } + ); + } + + // NOTE: We intentionally do NOT await deferred work from onTurnComplete here. + // Promises deferred in onTurnComplete (e.g. background self-review via + // chat.defer + chat.inject) run during the idle wait. If they complete + // before the next message, their injected context is picked up in prepareStep. + // The pre-onBeforeTurnComplete drain handles promises from onTurnStart/run(). + + // If messages arrived during streaming (without pendingMessages config), + // use the first one immediately as the next turn. + if (pendingMessages.length > 0) { + currentWirePayload = pendingMessages[0]!; + return "continue"; + } + + // Wait for the next message — stay idle briefly, then suspend + const effectiveIdleTimeout = + (metadata.get(IDLE_TIMEOUT_METADATA_KEY) as number | undefined) ?? + idleTimeoutInSeconds; + const effectiveTurnTimeout = + (metadata.get(TURN_TIMEOUT_METADATA_KEY) as string | undefined) ?? turnTimeout; + + const next = await messagesInput.waitWithIdleTimeout({ + idleTimeoutInSeconds: effectiveIdleTimeout, + timeout: effectiveTurnTimeout, + spanName: "waiting for next message", + }); + + if (!next.ok) { + return "exit"; + } + + currentWirePayload = next.output as ChatTaskWirePayload< + TUIMessage, + inferSchemaIn + >; + return "continue"; + }, + { + attributes: turnAttributes, + } + ); + + if (turnResult === "exit") return; + // "continue" means proceed to next iteration + } + } finally { + stopSub.off(); + } + }, + }); +} + +/** + * Optional config for {@link chat.withUIMessage}. `streamOptions` become default + * static `toUIMessageStream()` settings; inner `chat.task({ uiMessageStreamOptions })` + * shallow-merges on top (task wins on conflicts). + */ +export type ChatWithUIMessageConfig = { + streamOptions?: ChatUIMessageStreamOptions; +}; + +/** + * Fix the UI message type for a chat task (AI SDK `UIMessage` generics) while + * keeping `id` and `clientDataSchema` inference on the inner {@link chat.task} call. + * + * @example + * ```ts + * type AgentUiMessage = UIMessage; + * + * export const myChat = chat.withUIMessage({ + * streamOptions: { sendReasoning: true }, + * }).task({ + * id: "my-chat", + * run: async ({ messages, signal }) => { ... }, + * }); + * ``` + */ +function withUIMessage( + config?: ChatWithUIMessageConfig +): { + task: ( + options: ChatTaskOptions + ) => Task>, unknown>; +} { + function taskWithUiMessage< + TIdentifier extends string, + TClientDataSchema extends TaskSchema | undefined = undefined, + >(options: ChatTaskOptions) { + const mergedUiStream = + config?.streamOptions && options.uiMessageStreamOptions + ? { ...config.streamOptions, ...options.uiMessageStreamOptions } + : options.uiMessageStreamOptions ?? config?.streamOptions; + return chatTask({ + ...options, + uiMessageStreamOptions: mergedUiStream, + }); + } + + return { task: taskWithUiMessage }; +} + +/** + * Namespace for AI SDK chat integration. + * + * @example + * ```ts + * import { chat } from "@trigger.dev/sdk/ai"; + * + * // Define a chat task + * export const myChat = chat.task({ + * id: "my-chat", + * run: async ({ messages, signal }) => { + * return streamText({ model, messages, abortSignal: signal }); + * }, + * }); + * + * // Pipe a stream manually (from inside a task) + * await chat.pipe(streamTextResult); + * + * // Create an access token (from a server action) + * const token = await chat.createAccessToken("my-chat"); + * ``` + */ +// --------------------------------------------------------------------------- +// Runtime configuration helpers +// --------------------------------------------------------------------------- + +const TURN_TIMEOUT_METADATA_KEY = "chat.turnTimeout"; +const IDLE_TIMEOUT_METADATA_KEY = "chat.idleTimeout"; + +/** + * Override the turn timeout for subsequent turns in the current run. + * + * The turn timeout controls how long the run stays suspended (freeing compute) + * waiting for the next user message. When it expires, the run completes + * gracefully and the next message starts a fresh run. + * + * Call from inside a `chatTask` run function to adjust based on context. + * + * @param duration - A duration string (e.g. `"5m"`, `"1h"`, `"30s"`) + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setTurnTimeout("2h"); + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setTurnTimeout(duration: string): void { + metadata.set(TURN_TIMEOUT_METADATA_KEY, duration); +} + +/** + * Override the turn timeout in seconds for subsequent turns in the current run. + * + * @param seconds - Number of seconds to wait for the next message before ending the run + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setTurnTimeoutInSeconds(3600); // 1 hour + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setTurnTimeoutInSeconds(seconds: number): void { + metadata.set(TURN_TIMEOUT_METADATA_KEY, `${seconds}s`); +} + +/** + * Override the idle timeout for subsequent turns in the current run. + * + * The idle timeout controls how long the run stays active (using compute) + * after each turn, waiting for the next message. During this window, + * responses are instant. After it expires, the run suspends. + * + * @param seconds - Number of seconds to stay idle (0 to suspend immediately) + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setIdleTimeoutInSeconds(60); + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setIdleTimeoutInSeconds(seconds: number): void { + metadata.set(IDLE_TIMEOUT_METADATA_KEY, seconds); +} + +/** + * Override the `toUIMessageStream()` options for the current turn. + * + * These options control how the `StreamTextResult` is converted to a + * `UIMessageChunk` stream — error handling, reasoning/source visibility, + * message metadata, etc. + * + * Per-turn options are merged on top of the static `uiMessageStreamOptions` + * set on `chat.task()`. Per-turn values win on conflicts. + * + * @example + * ```ts + * run: async ({ messages, signal }) => { + * chat.setUIMessageStreamOptions({ + * sendReasoning: true, + * onError: (error) => error instanceof Error ? error.message : "An error occurred.", + * }); + * return streamText({ model, messages, abortSignal: signal }); + * } + * ``` + */ +function setUIMessageStreamOptions(options: ChatUIMessageStreamOptions): void { + locals.set(chatUIStreamPerTurnKey, options); +} + +/** + * Resolve the effective UIMessageStream options by merging: + * 1. Static task-level options (from `chat.task({ uiMessageStreamOptions })`) + * 2. Per-turn overrides (from `chat.setUIMessageStreamOptions()`) + * + * Per-turn values win on conflicts. Clears the per-turn override after reading + * so it doesn't leak into subsequent turns. + * @internal + */ +function resolveUIMessageStreamOptions(): ChatUIMessageStreamOptions { + const staticOptions = locals.get(chatUIStreamStaticKey) ?? {}; + const perTurnOptions = locals.get(chatUIStreamPerTurnKey) ?? {}; + // Clear per-turn override so it doesn't leak into subsequent turns + locals.set(chatUIStreamPerTurnKey, undefined); + return { ...staticOptions, ...perTurnOptions }; +} + +// --------------------------------------------------------------------------- +// Stop detection +// --------------------------------------------------------------------------- + +/** + * Check whether the user stopped generation during the current turn. + * + * Works from **anywhere** inside a `chat.task` run — including inside + * `streamText`'s `onFinish` callback — without needing to thread the + * `stopSignal` through closures. + * + * This is especially useful when the AI SDK's `isAborted` flag is unreliable + * (e.g. when using `createUIMessageStream` + `writer.merge()`). + * + * @example + * ```ts + * onFinish: ({ isAborted }) => { + * const wasStopped = isAborted || chat.isStopped(); + * if (wasStopped) { + * // handle stop + * } + * } + * ``` + */ +function isStopped(): boolean { + const controller = locals.get(chatStopControllerKey); + return controller?.signal.aborted ?? false; +} + +// --------------------------------------------------------------------------- +// Per-turn deferred work +// --------------------------------------------------------------------------- + +/** + * Register a promise that runs in the background during the current turn. + * + * Use this to move non-blocking work (DB writes, analytics, etc.) out of + * the critical path. The promise runs in parallel with streaming and is + * awaited (with a 5 s timeout) before `onTurnComplete` fires. + * + * @example + * ```ts + * onTurnStart: async ({ chatId, uiMessages }) => { + * // Persist messages without blocking the LLM call + * chat.defer(db.chat.update({ where: { id: chatId }, data: { messages: uiMessages } })); + * }, + * ``` + */ +function chatDefer(promise: Promise): void { + const promises = locals.get(chatDeferKey); + if (promises) { + promises.add(promise); + } +} + +// --------------------------------------------------------------------------- +// Background context injection +// --------------------------------------------------------------------------- + +/** + * Queue model messages for injection at the next `prepareStep` boundary. + * + * Use this to inject context from background work into the agent's conversation. + * Messages are appended to the model messages before the next LLM inference call. + * + * Combine with `chat.defer()` to run background analysis and inject results: + * + * @example + * ```ts + * onTurnComplete: async ({ messages }) => { + * chat.defer((async () => { + * const review = await generateObject({ + * model: openai("gpt-4o-mini"), + * messages: [...messages, { role: "user", content: "Review the last response." }], + * schema: z.object({ suggestions: z.array(z.string()) }), + * }); + * if (review.object.suggestions.length > 0) { + * chat.inject([{ + * role: "system", + * content: `Improvements for next response:\n${review.object.suggestions.join("\n")}`, + * }]); + * } + * })()); + * }, + * ``` + */ +function injectBackgroundContext(messages: ModelMessage[]): void { + const queue = locals.get(chatBackgroundQueueKey) ?? []; + queue.push(...messages); + locals.set(chatBackgroundQueueKey, queue); +} + +// --------------------------------------------------------------------------- +// Aborted message cleanup +// --------------------------------------------------------------------------- + +/** + * Clean up a UIMessage that was captured during an aborted/stopped turn. + * + * When generation is stopped mid-stream, the captured message may contain: + * - Tool parts stuck in incomplete states (`partial-call`, `input-available`, + * `input-streaming`) that cause permanent UI spinners + * - Reasoning parts with `state: "streaming"` instead of `"done"` + * - Text parts with `state: "streaming"` instead of `"done"` + * + * This function returns a cleaned copy with: + * - Incomplete tool parts removed entirely + * - Reasoning and text parts marked as `"done"` + * + * `chat.task` calls this automatically when stop is detected before passing + * the response to `onTurnComplete`. Use this manually when calling `pipeChat` + * directly and capturing response messages yourself. + * + * @example + * ```ts + * onTurnComplete: async ({ responseMessage, stopped }) => { + * // Already cleaned automatically by chat.task — but if you captured + * // your own message via pipeChat, clean it manually: + * const cleaned = chat.cleanupAbortedParts(myMessage); + * await db.messages.save(cleaned); + * } + * ``` + */ +function cleanupAbortedParts(message: TUIM): TUIM { + if (!message.parts) return message; + + const isToolPart = (part: any) => + part.type === "tool-invocation" || + part.type?.startsWith("tool-") || + part.type === "dynamic-tool"; + + return { + ...message, + parts: message.parts + .filter((part: any) => { + if (!isToolPart(part)) return true; + // Remove tool parts that never completed execution. + // partial-call: input was still streaming when aborted. + // input-available: input was complete but tool never ran. + // input-streaming: input was mid-stream. + const state = part.toolInvocation?.state ?? part.state; + return ( + state !== "partial-call" && state !== "input-available" && state !== "input-streaming" + ); + }) + .map((part: any) => { + // Mark streaming reasoning as done + if (part.type === "reasoning" && part.state === "streaming") { + return { ...part, state: "done" }; + } + // Mark streaming text as done + if (part.type === "text" && part.state === "streaming") { + return { ...part, state: "done" }; + } + return part; + }), + } as TUIM; +} + +// --------------------------------------------------------------------------- +// Composable primitives for raw task chat +// --------------------------------------------------------------------------- + +/** + * Create a managed stop signal wired to the chat stop input stream. + * + * Call once at the start of your run. Use `signal` as the abort signal for + * `streamText`. Call `reset()` at the start of each turn to get a fresh + * per-turn signal. Call `cleanup()` when the run ends. + * + * @example + * ```ts + * const stop = chat.createStopSignal(); + * for (let turn = 0; turn < 100; turn++) { + * stop.reset(); + * const result = streamText({ model, messages, abortSignal: stop.signal }); + * await chat.pipe(result); + * // ... + * } + * stop.cleanup(); + * ``` + */ +function createStopSignal(): { + readonly signal: AbortSignal; + reset: () => void; + cleanup: () => void; +} { + let controller = new AbortController(); + const sub = stopInput.on((data) => { + controller.abort(data?.message || "stopped"); + }); + return { + get signal() { + return controller.signal; + }, + reset() { + controller = new AbortController(); + }, + cleanup() { + sub.off(); + }, + }; +} + +/** + * Signal the frontend that the current turn is complete. + * + * The `TriggerChatTransport` intercepts this to close the ReadableStream + * for the current turn. Call after piping the response stream. + * + * @example + * ```ts + * await chat.pipe(result); + * await chat.writeTurnComplete(); + * ``` + */ +async function chatWriteTurnComplete(options?: { publicAccessToken?: string }): Promise { + await writeTurnCompleteChunk(undefined, options?.publicAccessToken); +} + +/** + * Pipe a `StreamTextResult` (or similar) to the chat stream and capture + * the assistant's response message via `onFinish`. + * + * Combines `toUIMessageStream()` + `onFinish` callback + `chat.pipe()`. + * Returns the captured `UIMessage`, or `undefined` if capture failed. + * + * @example + * ```ts + * const result = streamText({ model, messages, abortSignal: signal }); + * const response = await chat.pipeAndCapture(result, { signal }); + * if (response) conversation.addResponse(response); + * ``` + */ +async function pipeChatAndCapture( + source: UIMessageStreamable, + options?: { signal?: AbortSignal; spanName?: string } +): Promise { + let captured: UIMessage | undefined; + let resolveOnFinish: () => void; + const onFinishPromise = new Promise((r) => { + resolveOnFinish = r; + }); + + const uiStream = source.toUIMessageStream({ + ...resolveUIMessageStreamOptions(), + onFinish: ({ responseMessage }: { responseMessage: UIMessage }) => { + captured = responseMessage; + resolveOnFinish!(); + }, + }); + + await pipeChat(uiStream, { + signal: options?.signal, + spanName: options?.spanName ?? "stream response", + }); + await onFinishPromise; + + return captured; +} + +/** + * Accumulates conversation messages across turns. + * + * Handles the transport protocol: turn 0 sends full history (replace), + * subsequent turns send only new messages (append), regenerate sends + * full history minus last assistant message (replace). + * + * @example + * ```ts + * const conversation = new chat.MessageAccumulator(); + * for (let turn = 0; turn < 100; turn++) { + * const messages = await conversation.addIncoming(payload.messages, payload.trigger, turn); + * const result = streamText({ model, messages }); + * const response = await chat.pipeAndCapture(result); + * if (response) await conversation.addResponse(response); + * } + * ``` + */ +class ChatMessageAccumulator { + modelMessages: ModelMessage[] = []; + uiMessages: UIMessage[] = []; + private _compaction?: ChatTaskCompactionOptions; + private _pendingMessages?: PendingMessagesOptions; + private _steeringQueue: SteeringQueueEntry[] = []; + + constructor(options?: { + compaction?: ChatTaskCompactionOptions; + pendingMessages?: PendingMessagesOptions; + }) { + this._compaction = options?.compaction; + this._pendingMessages = options?.pendingMessages; + } + + /** + * Add incoming messages from the transport payload. + * Returns the full accumulated model messages for `streamText`. + */ + async addIncoming(messages: UIMessage[], trigger: string, turn: number): Promise { + const cleaned = messages.map((m) => (m.role === "assistant" ? cleanupAbortedParts(m) : m)); + const model = await toModelMessages(cleaned); + + if (turn === 0 || trigger === "regenerate-message") { + this.modelMessages = model; + this.uiMessages = [...cleaned]; + } else { + this.modelMessages.push(...model); + this.uiMessages.push(...cleaned); + } + return this.modelMessages; + } + + /** + * Add the assistant's response to the accumulator. + * Call after `pipeAndCapture` with the captured response. + */ + /** + * Replace all accumulated messages (for compaction). + * Converts UIMessages to ModelMessages internally. + */ + async setMessages(uiMessages: UIMessage[]): Promise { + this.uiMessages = [...uiMessages]; + this.modelMessages = await toModelMessages(uiMessages); + } + + async addResponse(response: UIMessage): Promise { + if (!response.id) { + response = { ...response, id: generateMessageId() }; + } + this.uiMessages.push(response); + try { + const msgs = await toModelMessages([stripProviderMetadata(response)]); + this.modelMessages.push(...msgs); + } catch { + // Conversion failed — skip model message accumulation for this response + } + } + + /** + * Queue a message for injection via `prepareStep`. Call from a + * `messagesInput.on()` listener when a message arrives during streaming. + */ + steer(message: UIMessage, modelMessages?: ModelMessage[]): void { + if (modelMessages) { + this._steeringQueue.push({ uiMessage: message, modelMessages }); + } else { + // Defer conversion — will be done in prepareStep if needed + this._steeringQueue.push({ uiMessage: message, modelMessages: [] }); + } + } + + /** + * Queue a message for injection, converting to model messages automatically. + */ + async steerAsync(message: UIMessage): Promise { + const modelMsgs = await toModelMessages([message]); + this._steeringQueue.push({ uiMessage: message, modelMessages: modelMsgs }); + } + + /** + * Get and clear unconsumed steering messages. + */ + drainSteering(): UIMessage[] { + const result = this._steeringQueue.map((e) => e.uiMessage); + this._steeringQueue = []; + return result; + } + + /** + * Returns a `prepareStep` function that handles both compaction and + * pending message injection. Pass to `streamText({ prepareStep: conversation.prepareStep() })`. + */ + prepareStep(): + | ((args: { + messages: ModelMessage[]; + steps: CompactionStep[]; + }) => Promise<{ messages: ModelMessage[] } | undefined>) + | undefined { + if (!this._compaction && !this._pendingMessages) return undefined; + const comp = this._compaction; + const pm = this._pendingMessages; + const queue = this._steeringQueue; + + return async ({ messages, steps }) => { + let resultMessages: ModelMessage[] | undefined; + + // 1. Compaction + if (comp) { + const result = await chatCompact(messages, steps, { + shouldCompact: comp.shouldCompact, + summarize: (msgs) => comp.summarize({ messages: msgs, source: "inner" }), + }); + if (result.type !== "skipped") { + resultMessages = result.messages; + } + } + + // 2. Pending message injection + if (pm && queue.length > 0) { + const injected = await drainSteeringQueue(pm, resultMessages ?? messages, steps, queue); + if (injected.length > 0) { + resultMessages = [...(resultMessages ?? messages), ...injected]; + } + } + + return resultMessages ? { messages: resultMessages } : undefined; + }; + } + + /** + * Run outer-loop compaction if needed. Call after adding the response + * and capturing usage. Applies `compactModelMessages` and `compactUIMessages` + * callbacks if configured. + * + * @returns `true` if compaction was performed, `false` otherwise. + */ + async compactIfNeeded( + usage: LanguageModelUsage | undefined, + context?: { + chatId?: string; + turn?: number; + clientData?: unknown; + totalUsage?: LanguageModelUsage; + } + ): Promise { + if (!this._compaction || !usage) return false; + + const shouldTrigger = await this._compaction.shouldCompact({ + messages: this.modelMessages, + totalTokens: usage.totalTokens, + inputTokens: usage.inputTokens, + outputTokens: usage.outputTokens, + usage, + totalUsage: context?.totalUsage, + chatId: context?.chatId, + turn: context?.turn, + clientData: context?.clientData, + source: "outer", + }); + + if (!shouldTrigger) return false; + + const summary = await this._compaction.summarize({ + messages: this.modelMessages, + usage, + totalUsage: context?.totalUsage, + chatId: context?.chatId, + turn: context?.turn, + clientData: context?.clientData, + source: "outer", + }); + + const compactEvent: CompactMessagesEvent = { + summary, + uiMessages: this.uiMessages, + modelMessages: this.modelMessages, + chatId: context?.chatId ?? "", + turn: context?.turn ?? 0, + clientData: context?.clientData, + source: "outer", + }; + + this.modelMessages = this._compaction.compactModelMessages + ? await this._compaction.compactModelMessages(compactEvent) + : [ + { + role: "assistant" as const, + content: [{ type: "text" as const, text: `[Conversation summary]\n\n${summary}` }], + }, + ]; + + if (this._compaction.compactUIMessages) { + this.uiMessages = await this._compaction.compactUIMessages(compactEvent); + } + + return true; + } +} + +// --------------------------------------------------------------------------- +// chat.createSession — async iterator for chat turns +// --------------------------------------------------------------------------- + +export type ChatSessionOptions = { + /** Run-level cancel signal (from task context). */ + signal: AbortSignal; + /** Seconds to stay idle between turns before suspending. @default 30 */ + idleTimeoutInSeconds?: number; + /** Duration string for suspend timeout. @default "1h" */ + timeout?: string; + /** Max turns before ending. @default 100 */ + maxTurns?: number; + /** Automatic context compaction — same options as `chat.task({ compaction })`. */ + compaction?: ChatTaskCompactionOptions; + /** Configure mid-execution message injection — same options as `chat.task({ pendingMessages })`. */ + pendingMessages?: PendingMessagesOptions; +}; + +export type ChatTurn = { + /** Turn number (0-indexed). */ + number: number; + /** Chat session ID. */ + chatId: string; + /** What triggered this turn. */ + trigger: string; + /** Client data from the transport (`metadata` field on the wire payload). */ + clientData: unknown; + /** Full accumulated model messages — pass directly to `streamText`. */ + readonly messages: ModelMessage[]; + /** Full accumulated UI messages — use for persistence. */ + readonly uiMessages: UIMessage[]; + /** Combined stop+cancel AbortSignal (fresh each turn). */ + signal: AbortSignal; + /** Whether the user stopped generation this turn. */ + readonly stopped: boolean; + /** Whether this is a continuation run. */ + continuation: boolean; + /** Token usage from the previous turn. Undefined on turn 0. */ + previousTurnUsage?: LanguageModelUsage; + /** Cumulative token usage across all completed turns so far. */ + totalUsage: LanguageModelUsage; + + /** + * Replace accumulated messages (for compaction). Takes UIMessages and + * converts to ModelMessages internally. After calling this, `turn.messages` + * reflects the compacted history. + */ + setMessages(uiMessages: UIMessage[]): Promise; + + /** + * Easy path: pipe stream, capture response, accumulate it, + * clean up aborted parts if stopped, and write turn-complete chunk. + */ + complete(source: UIMessageStreamable): Promise; + + /** + * Manual path: just write turn-complete chunk. + * Use when you've already piped and accumulated manually. + */ + done(): Promise; + + /** + * Add the response to the accumulator manually. + * Use with `chat.pipeAndCapture` when you need control between pipe and done. + */ + addResponse(response: UIMessage): Promise; + + /** + * Returns a `prepareStep` function that handles both compaction and + * pending message injection. Pass to `streamText({ prepareStep: turn.prepareStep() })`. + * Only needed when not using `chat.toStreamTextOptions()` (which auto-injects it). + */ + prepareStep(): + | ((args: { + messages: ModelMessage[]; + steps: CompactionStep[]; + }) => Promise<{ messages: ModelMessage[] } | undefined>) + | undefined; +}; + +/** + * Create a chat session that yields turns as an async iterator. + * + * Handles: preload wait, stop signals, message accumulation, turn-complete + * signaling, and idle/suspend between turns. You control: initialization, + * model/tool selection, persistence, and any custom per-turn logic. + * + * @example + * ```ts + * import { task } from "@trigger.dev/sdk"; + * import { chat, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai"; + * import { streamText } from "ai"; + * import { openai } from "@ai-sdk/openai"; + * + * export const myChat = task({ + * id: "my-chat", + * run: async (payload: ChatTaskWirePayload, { signal }) => { + * const session = chat.createSession(payload, { signal }); + * + * for await (const turn of session) { + * const result = streamText({ + * model: openai("gpt-4o"), + * messages: turn.messages, + * abortSignal: turn.signal, + * }); + * await turn.complete(result); + * } + * }, + * }); + * ``` + */ +function createChatSession( + payload: ChatTaskWirePayload, + options: ChatSessionOptions +): AsyncIterable { + const { + signal: runSignal, + idleTimeoutInSeconds = 30, + timeout = "1h", + maxTurns = 100, + compaction: sessionCompaction, + pendingMessages: sessionPendingMessages, + } = options; + + return { + [Symbol.asyncIterator]() { + let currentPayload = payload; + let turn = -1; + const stop = createStopSignal(); + const accumulator = new ChatMessageAccumulator(); + let previousTurnUsage: LanguageModelUsage | undefined; + let cumulativeUsage: LanguageModelUsage = emptyUsage(); + + return { + async next(): Promise> { + turn++; + + // First turn: handle preload — wait for the first real message + if (turn === 0 && currentPayload.trigger === "preload") { + const result = await messagesInput.waitWithIdleTimeout({ + idleTimeoutInSeconds: currentPayload.idleTimeoutInSeconds ?? idleTimeoutInSeconds, + timeout, + spanName: "waiting for first message", + }); + if (!result.ok || runSignal.aborted) { + stop.cleanup(); + return { done: true, value: undefined }; + } + currentPayload = result.output; + } + + // Subsequent turns: wait for the next message + if (turn > 0) { + const next = await messagesInput.waitWithIdleTimeout({ + idleTimeoutInSeconds, + timeout, + spanName: "waiting for next message", + }); + if (!next.ok || runSignal.aborted) { + stop.cleanup(); + return { done: true, value: undefined }; + } + currentPayload = next.output; + } + + // Check limits + if (turn >= maxTurns || runSignal.aborted) { + stop.cleanup(); + return { done: true, value: undefined }; + } + + // Reset stop signal for this turn + stop.reset(); + + // Set up steering queue and pending messages config in locals + // so toStreamTextOptions() auto-injects prepareStep for steering + const turnSteeringQueue: SteeringQueueEntry[] = []; + locals.set(chatSteeringQueueKey, turnSteeringQueue); + if (sessionPendingMessages) { + locals.set(chatPendingMessagesKey, sessionPendingMessages); + } + locals.set(chatTurnContextKey, { + chatId: currentPayload.chatId, + turn, + continuation: currentPayload.continuation ?? false, + clientData: currentPayload.metadata, + }); + + // Listen for messages during streaming (steering + next-turn buffer) + const sessionPendingWire: ChatTaskWirePayload[] = []; + const sessionMsgSub = messagesInput.on(async (msg) => { + sessionPendingWire.push(msg); + + if (sessionPendingMessages) { + const lastUIMessage = msg.messages?.[msg.messages.length - 1]; + if (lastUIMessage) { + if (sessionPendingMessages.onReceived) { + try { + await sessionPendingMessages.onReceived({ + message: lastUIMessage, + chatId: currentPayload.chatId, + turn, + }); + } catch { + /* non-fatal */ + } + } + try { + const modelMsgs = await toModelMessages([lastUIMessage]); + turnSteeringQueue.push({ uiMessage: lastUIMessage, modelMessages: modelMsgs }); + } catch { + /* non-fatal */ + } + } + } + }); + + // Accumulate messages + const messages = await accumulator.addIncoming( + currentPayload.messages, + currentPayload.trigger, + turn + ); + + const combinedSignal = AbortSignal.any([runSignal, stop.signal]); + + const turnObj: ChatTurn = { + number: turn, + chatId: currentPayload.chatId, + trigger: currentPayload.trigger, + clientData: currentPayload.metadata, + get messages() { + return accumulator.modelMessages; + }, + get uiMessages() { + return accumulator.uiMessages; + }, + signal: combinedSignal, + get stopped() { + return stop.signal.aborted && !runSignal.aborted; + }, + continuation: currentPayload.continuation ?? false, + previousTurnUsage, + totalUsage: cumulativeUsage, + + async setMessages(uiMessages: UIMessage[]) { + await accumulator.setMessages(uiMessages); + }, + + async complete(source: UIMessageStreamable) { + let response: UIMessage | undefined; + try { + response = await pipeChatAndCapture(source, { signal: combinedSignal }); + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) { + // Full cancel — don't accumulate + sessionMsgSub.off(); + await chatWriteTurnComplete(); + return undefined; + } + // Stop — fall through to accumulate partial response + } else { + throw error; + } + } + + if (response) { + const cleaned = + stop.signal.aborted && !runSignal.aborted + ? cleanupAbortedParts(response) + : response; + await accumulator.addResponse(cleaned); + } + + // Capture token usage from the streamText result + let turnUsage: LanguageModelUsage | undefined; + if (typeof (source as any).totalUsage?.then === "function") { + try { + const usage: LanguageModelUsage = await (source as any).totalUsage; + turnUsage = usage; + previousTurnUsage = usage; + cumulativeUsage = addUsage(cumulativeUsage, usage); + } catch { + /* non-fatal */ + } + } + + // Outer-loop compaction (same logic as chat.task) + if (sessionCompaction && turnUsage && !turnObj.stopped) { + const shouldTrigger = await sessionCompaction.shouldCompact({ + messages: accumulator.modelMessages, + totalTokens: turnUsage.totalTokens, + inputTokens: turnUsage.inputTokens, + outputTokens: turnUsage.outputTokens, + usage: turnUsage, + totalUsage: cumulativeUsage, + chatId: currentPayload.chatId, + turn, + clientData: currentPayload.metadata, + source: "outer", + }); + + if (shouldTrigger) { + const summary = await sessionCompaction.summarize({ + messages: accumulator.modelMessages, + usage: turnUsage, + totalUsage: cumulativeUsage, + chatId: currentPayload.chatId, + turn, + clientData: currentPayload.metadata, + source: "outer", + }); + + const compactEvent: CompactMessagesEvent = { + summary, + uiMessages: accumulator.uiMessages, + modelMessages: accumulator.modelMessages, + chatId: currentPayload.chatId, + turn, + clientData: currentPayload.metadata, + source: "outer", + }; + + accumulator.modelMessages = sessionCompaction.compactModelMessages + ? await sessionCompaction.compactModelMessages(compactEvent) + : [ + { + role: "assistant" as const, + content: [ + { type: "text" as const, text: `[Conversation summary]\n\n${summary}` }, + ], + }, + ]; + + if (sessionCompaction.compactUIMessages) { + accumulator.uiMessages = await sessionCompaction.compactUIMessages( + compactEvent + ); + } + } + } + + sessionMsgSub.off(); + await chatWriteTurnComplete(); + return response; + }, + + async addResponse(response: UIMessage) { + await accumulator.addResponse(response); + }, + + async done() { + sessionMsgSub.off(); + await chatWriteTurnComplete(); + }, + + prepareStep() { + const hasCompaction = !!sessionCompaction; + const hasPending = !!sessionPendingMessages; + if (!hasCompaction && !hasPending) return undefined; + + return async ({ + messages: stepMsgs, + steps, + }: { + messages: ModelMessage[]; + steps: CompactionStep[]; + }) => { + let resultMessages: ModelMessage[] | undefined; + + if (sessionCompaction) { + const compactResult = await chatCompact(stepMsgs, steps, { + shouldCompact: sessionCompaction.shouldCompact, + summarize: (msgs) => + sessionCompaction.summarize({ messages: msgs, source: "inner" }), + }); + if (compactResult.type !== "skipped") { + resultMessages = compactResult.messages; + } + } + + if (sessionPendingMessages) { + const injected = await drainSteeringQueue( + sessionPendingMessages, + resultMessages ?? stepMsgs, + steps, + turnSteeringQueue + ); + if (injected.length > 0) { + resultMessages = [...(resultMessages ?? stepMsgs), ...injected]; + } + } + + return resultMessages ? { messages: resultMessages } : undefined; + }; + }, + }; + + return { done: false, value: turnObj }; + }, + + async return() { + stop.cleanup(); + return { done: true, value: undefined }; + }, + }; + }, + }; +} + +// --------------------------------------------------------------------------- +// chat.local — per-run typed data with Proxy access +// --------------------------------------------------------------------------- + +/** @internal Symbol for storing the locals key on the proxy target. */ +const CHAT_LOCAL_KEY: unique symbol = Symbol("chatLocalKey"); +/** @internal Symbol for storing the dirty-tracking locals key. */ +const CHAT_LOCAL_DIRTY_KEY: unique symbol = Symbol("chatLocalDirtyKey"); + +// --------------------------------------------------------------------------- +// chat.local registry — tracks all declared locals for serialization +// --------------------------------------------------------------------------- + +type ChatLocalEntry = { key: ReturnType; id: string }; +const chatLocalRegistry = new Set(); + +/** @internal Run-scoped flag to ensure hydration happens at most once per run. */ +const chatLocalsHydratedKey = locals.create("chat.locals.hydrated"); + +/** + * Hydrate chat.local values from subtask metadata (set by `ai.toolExecute()` or legacy `ai.tool()`). + * Runs once per run — subsequent calls are no-ops. + * @internal + */ +function hydrateLocalsFromMetadata(): void { + if (locals.get(chatLocalsHydratedKey)) return; + locals.set(chatLocalsHydratedKey, true); + const opts = metadata.get(METADATA_KEY) as ToolCallExecutionOptions | undefined; + if (!opts?.chatLocals) return; + for (const [id, value] of Object.entries(opts.chatLocals)) { + locals.set(locals.create(id), value); + } +} + +/** + * A Proxy-backed, run-scoped data object that appears as `T` to users. + * Includes helper methods for initialization, dirty tracking, and serialization. + * Internal metadata is stored behind Symbols and invisible to + * `Object.keys()`, `JSON.stringify()`, and spread. + */ +export type ChatLocal> = T & { + /** Initialize the local with a value. Call in `onChatStart` or `run()`. */ + init(value: T): void; + /** Returns `true` if any property was set since the last check. Resets the dirty flag. */ + hasChanged(): boolean; + /** Returns a plain object copy of the current value. Useful for persistence. */ + get(): T; + readonly [CHAT_LOCAL_KEY]: ReturnType>; + readonly [CHAT_LOCAL_DIRTY_KEY]: ReturnType>; +}; + +/** + * Creates a per-run typed data object accessible from anywhere during task execution. + * + * Declare at module level, then initialize inside a lifecycle hook (e.g. `onChatStart`) + * using `chat.initLocal()`. Properties are accessible directly via the Proxy. + * + * Multiple locals can coexist — each gets its own isolated run-scoped storage. + * + * The `id` is required and must be unique across all `chat.local()` calls in + * your project. It's used to serialize values into subtask metadata so that + * `ai.toolExecute()` (or legacy `ai.tool()`) subtasks can auto-hydrate parent locals (read-only). + * + * @example + * ```ts + * import { chat } from "@trigger.dev/sdk/ai"; + * + * const userPrefs = chat.local<{ theme: string; language: string }>({ id: "userPrefs" }); + * const gameState = chat.local<{ score: number; streak: number }>({ id: "gameState" }); + * + * export const myChat = chat.task({ + * id: "my-chat", + * onChatStart: async ({ clientData }) => { + * const prefs = await db.prefs.findUnique({ where: { userId: clientData.userId } }); + * userPrefs.init(prefs ?? { theme: "dark", language: "en" }); + * gameState.init({ score: 0, streak: 0 }); + * }, + * onTurnComplete: async ({ chatId }) => { + * if (gameState.hasChanged()) { + * await db.save({ where: { chatId }, data: gameState.get() }); + * } + * }, + * run: async ({ messages }) => { + * gameState.score++; + * return streamText({ + * system: `User prefers ${userPrefs.theme} theme. Score: ${gameState.score}`, + * messages, + * }); + * }, + * }); + * ``` + */ +function chatLocal>(options: { id: string }): ChatLocal { + const id = `chat.local.${options.id}`; + const localKey = locals.create(id); + const dirtyKey = locals.create(`${id}.dirty`); + + chatLocalRegistry.add({ key: localKey, id }); + + const target = {} as any; + target[CHAT_LOCAL_KEY] = localKey; + target[CHAT_LOCAL_DIRTY_KEY] = dirtyKey; + + return new Proxy(target, { + get(_target, prop, _receiver) { + // Internal Symbol properties + if (prop === CHAT_LOCAL_KEY) return _target[CHAT_LOCAL_KEY]; + if (prop === CHAT_LOCAL_DIRTY_KEY) return _target[CHAT_LOCAL_DIRTY_KEY]; + + // Instance methods + if (prop === "init") { + return (value: T) => { + locals.set(localKey, value); + locals.set(dirtyKey, false); + }; + } + if (prop === "hasChanged") { + return () => { + const dirty = locals.get(dirtyKey) ?? false; + locals.set(dirtyKey, false); + return dirty; + }; + } + if (prop === "get") { + return () => { + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } + if (current === undefined) { + throw new Error("local.get() called before initialization. Call local.init() first."); + } + return { ...current }; + }; + } + // toJSON for serialization (JSON.stringify(local)) + if (prop === "toJSON") { + return () => { + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } + return current ? { ...current } : undefined; + }; + } + + let current = locals.get(localKey); + if (current === undefined) { + // Auto-hydrate from parent metadata in subtask context + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } + if (current === undefined) return undefined; + return (current as any)[prop]; + }, + + set(_target, prop, value) { + // Don't allow setting internal Symbols + if (typeof prop === "symbol") return false; + + const current = locals.get(localKey); + if (current === undefined) { + throw new Error( + "chat.local can only be modified after initialization. " + + "Call local.init() in onChatStart or run() first." + ); + } + locals.set(localKey, { ...current, [prop]: value }); + locals.set(dirtyKey, true); + return true; + }, + + has(_target, prop) { + if (typeof prop === "symbol") return prop in _target; + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } + return current !== undefined && prop in current; + }, + + ownKeys() { + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } + return current ? Reflect.ownKeys(current) : []; + }, + + getOwnPropertyDescriptor(_target, prop) { + if (typeof prop === "symbol") return undefined; + let current = locals.get(localKey); + if (current === undefined) { + hydrateLocalsFromMetadata(); + current = locals.get(localKey); + } + if (current === undefined || !(prop in current)) return undefined; + return { + configurable: true, + enumerable: true, + writable: true, + value: (current as any)[prop], + }; + }, + }) as ChatLocal; +} + +/** + * Extracts the client data (metadata) type from a chat task. + * Use this to type the `metadata` option on the transport. + * + * @example + * ```ts + * import type { InferChatClientData } from "@trigger.dev/sdk/ai"; + * import type { myChat } from "@/trigger/chat"; + * + * type MyClientData = InferChatClientData; + * // { model?: string; userId: string } + * ``` + */ +export type InferChatClientData = TTask extends Task< + string, + ChatTaskWirePayload, + any +> + ? TMetadata + : unknown; + +/** + * Extracts the UI message type from a chat task (wire payload `messages` items). + * + * @example + * ```ts + * import type { InferChatUIMessage } from "@trigger.dev/sdk/ai"; + * import type { myChat } from "@/trigger/chat"; + * + * type Msg = InferChatUIMessage; + * ``` + */ +export type InferChatUIMessage = TTask extends Task< + string, + ChatTaskWirePayload, + any +> + ? TUIM + : UIMessage; + +export const chat = { + /** Create a chat task. See {@link chatTask}. */ + task: chatTask, + /** Create a chat task with a fixed {@link UIMessage} subtype and optional default stream options. See {@link withUIMessage}. */ + withUIMessage, + /** Pipe a stream to the chat transport. See {@link pipeChat}. */ + pipe: pipeChat, + /** Create a per-run typed local. See {@link chatLocal}. */ + local: chatLocal, + /** Create a public access token for a chat task. See {@link createChatAccessToken}. */ + createAccessToken: createChatAccessToken, + /** Override the turn timeout at runtime (duration string). See {@link setTurnTimeout}. */ + setTurnTimeout, + /** Override the turn timeout at runtime (seconds). See {@link setTurnTimeoutInSeconds}. */ + setTurnTimeoutInSeconds, + /** Override the idle timeout at runtime. See {@link setIdleTimeoutInSeconds}. */ + setIdleTimeoutInSeconds, + /** Override toUIMessageStream() options for the current turn. See {@link setUIMessageStreamOptions}. */ + setUIMessageStreamOptions, + /** Check if the current turn was stopped by the user. See {@link isStopped}. */ + isStopped, + /** Clean up aborted parts from a UIMessage. See {@link cleanupAbortedParts}. */ + cleanupAbortedParts, + /** Register background work that runs in parallel with streaming. See {@link chatDefer}. */ + defer: chatDefer, + /** Queue model messages for injection at the next `prepareStep` boundary. See {@link injectBackgroundContext}. */ + inject: injectBackgroundContext, + /** Typed chat output stream for writing custom chunks or piping from subtasks. */ + stream: chatStream, + /** Pre-built input stream for receiving messages from the transport. */ + messages: messagesInput, + /** Create a managed stop signal wired to the stop input stream. See {@link createStopSignal}. */ + createStopSignal, + /** Signal the frontend that the current turn is complete. See {@link chatWriteTurnComplete}. */ + writeTurnComplete: chatWriteTurnComplete, + /** Pipe a stream and capture the response message. See {@link pipeChatAndCapture}. */ + pipeAndCapture: pipeChatAndCapture, + /** Message accumulator class for raw task chat. See {@link ChatMessageAccumulator}. */ + MessageAccumulator: ChatMessageAccumulator, + /** Create a chat session (async iterator). See {@link createChatSession}. */ + createSession: createChatSession, + /** + * Store and retrieve a resolved prompt for the current run. + * + * - `chat.prompt.set(resolved)` — store a `ResolvedPrompt` or plain string + * - `chat.prompt()` — read the stored prompt (throws if not set) + */ + prompt: Object.assign(getChatPrompt, { set: setChatPrompt }), + /** + * Returns an options object ready to spread into `streamText()`. + * Reads the stored prompt and returns `{ system, experimental_telemetry, ...config }`. + * Returns `{}` if no prompt has been set. + */ + toStreamTextOptions, + /** + * Replace the accumulated conversation messages for compaction. + * Call from `onTurnStart` or `onTurnComplete`. Takes `UIMessage[]` and + * converts to `ModelMessage[]` internally. + */ + setMessages: setChatMessages, + /** Check if it's safe to compact messages (no in-flight tool calls). */ + isCompactionSafe, + /** Returns a `prepareStep` function that handles context compaction automatically. */ + compactionStep: chatCompactionStep, + /** Low-level compaction for use inside a custom `prepareStep`. */ + compact: chatCompact, + /** Read the current compaction state (summary + base message count). */ + getCompactionState, +}; + +/** + * Writes a turn-complete control chunk to the chat output stream. + * The frontend transport intercepts this to close the ReadableStream for the current turn. + * @internal + */ +async function writeTurnCompleteChunk( + chatId?: string, + publicAccessToken?: string +): Promise { + const { waitUntilComplete } = streams.writer(CHAT_STREAM_KEY, { + spanName: "turn complete", + collapsed: true, + execute: ({ write }) => { + write({ + type: "__trigger_turn_complete", + ...(publicAccessToken ? { publicAccessToken } : {}), + }); + }, + }); + return await waitUntilComplete(); +} + +/** + * Extracts the text content of the last user message from a UIMessage array. + * Returns undefined if no user message is found. + * @internal + */ +function extractLastUserMessageText(messages: UIMessage[]): string | undefined { + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i]!; + if (msg.role !== "user") continue; + + // UIMessage uses parts array + if (msg.parts) { + const textParts = msg.parts + .filter((p: any) => p.type === "text" && p.text) + .map((p: any) => p.text as string); + if (textParts.length > 0) { + return textParts.join("\n"); + } + } + + break; + } + + return undefined; +} + +/** + * Strips ephemeral OpenAI Responses API `itemId` from a UIMessage's parts. + * + * The OpenAI Responses provider attaches `itemId` to message parts via + * `providerMetadata.openai.itemId`. These IDs are ephemeral — sending them + * back in a subsequent `streamText` call causes 404s because the provider + * can't find the referenced item (especially for stopped/partial responses). + * + * @internal + */ +function stripProviderMetadata(message: UIMessage): UIMessage { + if (!message.parts) return message; + return { + ...message, + parts: message.parts.map((part: any) => { + const openai = part.providerMetadata?.openai; + if (!openai?.itemId) return part; + + const { itemId, ...restOpenai } = openai; + const { openai: _, ...restProviders } = part.providerMetadata; + return { + ...part, + providerMetadata: { + ...restProviders, + ...(Object.keys(restOpenai).length > 0 ? { openai: restOpenai } : {}), + }, + }; + }), + }; +} diff --git a/packages/trigger-sdk/src/v3/chat-constants.ts b/packages/trigger-sdk/src/v3/chat-constants.ts new file mode 100644 index 00000000000..dcd170f02d1 --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat-constants.ts @@ -0,0 +1,13 @@ +/** + * Stream IDs used for bidirectional chat communication. + * Shared between backend (ai.ts) and frontend (chat.ts). + */ + +/** The output stream key where UIMessageChunks are written. */ +export const CHAT_STREAM_KEY = "chat"; + +/** Input stream ID for sending chat messages to the running task. */ +export const CHAT_MESSAGES_STREAM_ID = "chat-messages"; + +/** Input stream ID for sending stop signals to abort the current generation. */ +export const CHAT_STOP_STREAM_ID = "chat-stop"; diff --git a/packages/trigger-sdk/src/v3/chat-react.ts b/packages/trigger-sdk/src/v3/chat-react.ts new file mode 100644 index 00000000000..59b66d91c65 --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat-react.ts @@ -0,0 +1,362 @@ +"use client"; + +/** + * @module @trigger.dev/sdk/chat/react + * + * React hooks for AI SDK chat transport integration. + * Use alongside `@trigger.dev/sdk/chat` for a type-safe, ergonomic DX. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; + * import type { chat } from "@/trigger/chat"; + * + * function Chat() { + * const transport = useTriggerChatTransport({ + * task: "ai-chat", + * accessToken: ({ chatId }) => fetchToken(chatId), + * }); + * + * const { messages, sendMessage } = useChat({ transport }); + * } + * ``` + */ + +import { useCallback, useEffect, useRef, useState } from "react"; +import { TriggerChatTransport, type TriggerChatTransportOptions } from "./chat.js"; +import type { AnyTask, TaskIdentifier } from "@trigger.dev/core/v3"; +import { + PENDING_MESSAGE_INJECTED_TYPE, + type InferChatClientData, + type InferChatUIMessage, +} from "./ai.js"; +import type { UIMessage, ChatRequestOptions } from "ai"; + +/** + * Options for `useTriggerChatTransport`, with a type-safe `task` field. + * + * Pass a task type parameter to get compile-time validation of the task ID: + * ```ts + * useTriggerChatTransport({ task: "my-task", ... }) + * ``` + */ +export type UseTriggerChatTransportOptions = Omit< + TriggerChatTransportOptions>, + "task" +> & { + /** The task ID. Strongly typed when a task type parameter is provided. */ + task: TaskIdentifier; +}; + +export type { InferChatUIMessage }; + +/** + * React hook that creates and memoizes a `TriggerChatTransport` instance. + * + * The transport is created once on first render and reused for the lifetime + * of the component. This avoids the need for `useMemo` and ensures the + * transport's internal session state (run IDs, lastEventId, etc.) + * is preserved across re-renders. + * + * For dynamic access tokens, pass a function — it will be called on each + * request without needing to recreate the transport. + * + * The `onSessionChange` callback is kept in a ref so the transport always + * calls the latest version without needing to be recreated. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; + * import type { chat } from "@/trigger/chat"; + * + * function Chat() { + * const transport = useTriggerChatTransport({ + * task: "ai-chat", + * accessToken: ({ chatId }) => fetchToken(chatId), + * }); + * + * const { messages, sendMessage } = useChat({ transport }); + * } + * ``` + */ +export function useTriggerChatTransport( + options: UseTriggerChatTransportOptions +): TriggerChatTransport { + const ref = useRef(null); + if (ref.current === null) { + ref.current = new TriggerChatTransport(options); + } + + // Keep onSessionChange up to date without recreating the transport + const { onSessionChange, renewRunAccessToken } = options; + useEffect(() => { + ref.current?.setOnSessionChange(onSessionChange); + }, [onSessionChange]); + + useEffect(() => { + ref.current?.setRenewRunAccessToken(renewRunAccessToken); + }, [renewRunAccessToken]); + + return ref.current; +} + +// --------------------------------------------------------------------------- +// usePendingMessages — manage steering messages during streaming +// --------------------------------------------------------------------------- + +/** A pending message tracked by `usePendingMessages`. */ +export type PendingMessage = { + id: string; + text: string; + /** How this message is being handled. */ + mode: "steering" | "queued"; + /** Whether the backend confirmed this message was injected mid-response. */ + injected: boolean; +}; + +/** Options for `usePendingMessages`. */ +export type UsePendingMessagesOptions = { + /** The chat transport instance. */ + transport: TriggerChatTransport; + /** The chat session ID. */ + chatId: string; + /** The current useChat status. */ + status: string; + /** The current messages from useChat. */ + messages: TUIMessage[]; + /** The setMessages function from useChat. */ + setMessages: (fn: TUIMessage[] | ((prev: TUIMessage[]) => TUIMessage[])) => void; + /** The sendMessage function from useChat. */ + sendMessage: (message: { text: string }, options?: ChatRequestOptions) => void; + /** Metadata to include when sending (e.g. `{ model }` for model selection). */ + metadata?: Record; +}; + +/** A message embedded in an injection point data part. */ +export type InjectedMessage = { + id: string; + text: string; +}; + +/** Return value of `usePendingMessages`. */ +export type UsePendingMessagesReturn = { + /** Current pending messages with their mode and injection status. */ + pending: PendingMessage[]; + /** Send a steering message during streaming, or a normal message when ready. */ + steer: (text: string) => void; + /** Queue a message for the next turn (sent after current response finishes). */ + queue: (text: string) => void; + /** Promote a queued message to a steering message (sends via input stream immediately). */ + promoteToSteering: (id: string) => void; + /** Check if an assistant message part is an injection point. */ + isInjectionPoint: (part: unknown) => boolean; + /** Get the injected message IDs from an injection point part. */ + getInjectedMessageIds: (part: unknown) => string[]; + /** Get the injected messages (id + text) from an injection point part. Self-contained — works after turn complete. */ + getInjectedMessages: (part: unknown) => InjectedMessage[]; +}; + +/** + * React hook for managing pending messages (steering) during streaming. + * + * Handles: + * - Sending messages via input stream during streaming (bypassing useChat) + * - Tracking which messages were injected mid-response vs queued for next turn + * - Inserting injected messages into the conversation on turn complete + * - Auto-sending non-injected messages as the next turn + * + * @example + * ```tsx + * const pending = usePendingMessages({ + * transport, chatId, status, messages, setMessages, sendMessage, + * metadata: { model }, + * }); + * + * // In the form: + *
{ + * e.preventDefault(); + * pending.send(input); + * setInput(""); + * }}> + * + * // Render pending messages: + * {pending.pending.map(msg => ( + *
{msg.text} — {msg.injected ? "Injected" : "Pending"}
+ * ))} + * + * // Render injection points inline in assistant messages: + * {msg.parts.map((part, i) => + * pending.isInjectionPoint(part) + * ? + * : + * )} + * ``` + */ +export function usePendingMessages( + options: UsePendingMessagesOptions +): UsePendingMessagesReturn { + const { transport, chatId, status, messages, setMessages, sendMessage, metadata } = options; + + // Internal state: track messages with their mode + type InternalMessage = TUIMessage & { _mode: "steering" | "queued" }; + const [pendingMsgs, setPendingMsgs] = useState([]); + const injectedIdsRef = useRef>(new Set()); + const prevStatusRef = useRef(status); + + // Watch for injection confirmation chunks in streaming messages + useEffect(() => { + if (status !== "streaming") return; + let newlyInjected = false; + for (const msg of messages) { + if (msg.role !== "assistant") continue; + for (const part of msg.parts ?? []) { + if ((part as any).type === PENDING_MESSAGE_INJECTED_TYPE) { + const messageIds = (part as any).data?.messageIds; + if (Array.isArray(messageIds)) { + for (const id of messageIds) { + if (!injectedIdsRef.current.has(id)) { + injectedIdsRef.current.add(id); + newlyInjected = true; + } + } + } + } + } + } + // Remove injected steering messages from the pending overlay immediately + if (newlyInjected) { + setPendingMsgs((prev) => prev.filter((m) => !injectedIdsRef.current.has(m.id))); + } + }, [status, messages]); + + // Handle turn completion + useEffect(() => { + const turnCompleted = prevStatusRef.current === "streaming" && status === "ready"; + prevStatusRef.current = status; + if (!turnCompleted) return; + + // Auto-send non-injected messages as the next turn. + // This includes queued messages AND steering messages that weren't + // injected (arrived too late, no prepareStep boundary, etc.). + // Note: steering messages were also sent via sendPendingMessage to + // the backend's wire buffer, so the backend may already have them. + // Calling sendMessage here ensures useChat subscribes to the response. + const toSend = pendingMsgs.filter((m) => !injectedIdsRef.current.has(m.id)); + + // Clean up + setPendingMsgs([]); + injectedIdsRef.current.clear(); + promotedIdsRef.current.clear(); + + // Auto-send as next turn + if (toSend.length > 0) { + const text = toSend.map((m) => (m.parts?.[0] as any)?.text ?? "").join("\n"); + sendMessage({ text }, metadata ? { metadata } : undefined); + } + }, [status, pendingMsgs, sendMessage, metadata, messages]); + + // Send a steering message (injected mid-response via prepareStep) + const steer = useCallback( + (text: string) => { + if (status === "streaming") { + const msg = { + id: crypto.randomUUID(), + role: "user" as const, + parts: [{ type: "text" as const, text }], + _mode: "steering" as const, + } as InternalMessage; + transport.sendPendingMessage(chatId, msg, metadata); + setPendingMsgs((prev) => [...prev, msg]); + } else { + // Not streaming — just send normally + sendMessage({ text }, metadata ? { metadata } : undefined); + } + }, + [status, transport, chatId, sendMessage, metadata] + ); + + // Queue a message for the next turn (no injection attempt) + const queue = useCallback( + (text: string) => { + if (status === "streaming") { + const msg = { + id: crypto.randomUUID(), + role: "user" as const, + parts: [{ type: "text" as const, text }], + _mode: "queued" as const, + } as InternalMessage; + setPendingMsgs((prev) => [...prev, msg]); + } else { + sendMessage({ text }, metadata ? { metadata } : undefined); + } + }, + [status, sendMessage, metadata] + ); + + // Promote a queued message to steering (send via input stream immediately) + const promotedIdsRef = useRef>(new Set()); + const promoteToSteering = useCallback( + (id: string) => { + // Guard against double-click — ref check is synchronous + if (promotedIdsRef.current.has(id)) { + console.log("[usePendingMessages] promote blocked — already promoted:", id); + return; + } + console.log("[usePendingMessages] promoting:", id); + promotedIdsRef.current.add(id); + + setPendingMsgs((prev) => { + const msg = prev.find((m) => m.id === id); + if (!msg || msg._mode !== "queued") return prev; + transport.sendPendingMessage(chatId, msg, metadata); + return prev.map((m) => (m.id === id ? { ...m, _mode: "steering" as const } : m)); + }); + }, + [transport, chatId, metadata] + ); + + const isInjectionPoint = useCallback( + (part: unknown): boolean => + typeof part === "object" && + part !== null && + (part as any).type === PENDING_MESSAGE_INJECTED_TYPE, + [] + ); + + const getInjectedMessageIds = useCallback( + (part: unknown): string[] => { + if (!isInjectionPoint(part)) return []; + const ids = (part as any).data?.messageIds; + return Array.isArray(ids) ? ids : []; + }, + [isInjectionPoint] + ); + + const getInjectedMessages = useCallback( + (part: unknown): InjectedMessage[] => { + if (!isInjectionPoint(part)) return []; + const msgs = (part as any).data?.messages; + return Array.isArray(msgs) ? msgs : []; + }, + [isInjectionPoint] + ); + + const pending: PendingMessage[] = pendingMsgs.map((m) => ({ + id: m.id, + text: (m.parts?.[0] as any)?.text ?? "", + mode: m._mode, + injected: injectedIdsRef.current.has(m.id), + })); + + return { + pending, + steer, + queue, + promoteToSteering, + isInjectionPoint, + getInjectedMessageIds, + getInjectedMessages, + }; +} diff --git a/packages/trigger-sdk/src/v3/chat.test.ts b/packages/trigger-sdk/src/v3/chat.test.ts new file mode 100644 index 00000000000..3e610957501 --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat.test.ts @@ -0,0 +1,2190 @@ +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest"; +import type { UIMessage, UIMessageChunk } from "ai"; +import { TriggerChatTransport, createChatTransport } from "./chat.js"; + +// Helper: encode text as SSE format +function sseEncode(chunks: (UIMessageChunk | Record)[]): string { + return chunks.map((chunk, i) => `id: ${i}\ndata: ${JSON.stringify(chunk)}\n\n`).join(""); +} + +// Helper: create a ReadableStream from SSE text +function createSSEStream(sseText: string): ReadableStream { + const encoder = new TextEncoder(); + return new ReadableStream({ + start(controller) { + controller.enqueue(encoder.encode(sseText)); + controller.close(); + }, + }); +} + +// Helper: create test UIMessages with unique IDs +let messageIdCounter = 0; + +function createUserMessage(text: string): UIMessage { + return { + id: `msg-user-${++messageIdCounter}`, + role: "user", + parts: [{ type: "text", text }], + }; +} + +function createAssistantMessage(text: string): UIMessage { + return { + id: `msg-assistant-${++messageIdCounter}`, + role: "assistant", + parts: [{ type: "text", text }], + }; +} + +// Sample UIMessageChunks as the AI SDK would produce +const sampleChunks: UIMessageChunk[] = [ + { type: "text-start", id: "part-1" }, + { type: "text-delta", id: "part-1", delta: "Hello" }, + { type: "text-delta", id: "part-1", delta: " world" }, + { type: "text-delta", id: "part-1", delta: "!" }, + { type: "text-end", id: "part-1" }, +]; + +describe("TriggerChatTransport", () => { + let originalFetch: typeof global.fetch; + + beforeEach(() => { + originalFetch = global.fetch; + }); + + afterEach(() => { + global.fetch = originalFetch; + vi.restoreAllMocks(); + }); + + describe("constructor", () => { + it("should create transport with required options", () => { + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should accept optional configuration", () => { + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + baseURL: "https://custom.trigger.dev", + streamKey: "custom-stream", + headers: { "X-Custom": "value" }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should accept a function for accessToken", () => { + let tokenCallCount = 0; + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: () => { + tokenCallCount++; + return `dynamic-token-${tokenCallCount}`; + }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should pass chatId and purpose to accessToken when triggering a run", async () => { + const accessTokenSpy = vi.fn().mockReturnValue("test-token"); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_resolve_at" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_tok", + }, + }); + } + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(sseEncode(sampleChunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: accessTokenSpy, + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-access-resolve", + messageId: undefined, + messages: [createUserMessage("Hi")], + abortSignal: undefined, + }); + const reader = stream.getReader(); + while (true) { + const { done } = await reader.read(); + if (done) break; + } + + expect(accessTokenSpy).toHaveBeenCalledWith({ + chatId: "chat-access-resolve", + purpose: "trigger", + }); + }); + + it("should pass chatId and purpose preload to accessToken when preloading", async () => { + const accessTokenSpy = vi.fn().mockReturnValue("test-token"); + + global.fetch = vi.fn().mockResolvedValue( + new Response(JSON.stringify({ id: "run_preload_at" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_pre", + }, + }) + ); + + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: accessTokenSpy, + baseURL: "https://api.test.trigger.dev", + }); + + await transport.preload("chat-preload-access"); + + expect(accessTokenSpy).toHaveBeenCalledWith({ + chatId: "chat-preload-access", + purpose: "preload", + }); + }); + }); + + describe("sendMessages", () => { + it("should trigger the task and return a ReadableStream of UIMessageChunks", async () => { + const triggerRunId = "run_abc123"; + const publicToken = "pub_token_xyz"; + + // Mock fetch to handle both the trigger request and the SSE stream request + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + // Handle the task trigger request + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: triggerRunId }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + }); + } + + // Handle the SSE stream request + if (urlStr.includes("/realtime/v1/streams/")) { + const sseText = sseEncode(sampleChunks); + return new Response(createSSEStream(sseText), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [createUserMessage("Hello!")]; + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages, + abortSignal: undefined, + }); + + expect(stream).toBeInstanceOf(ReadableStream); + + // Read all chunks from the stream + const reader = stream.getReader(); + const receivedChunks: UIMessageChunk[] = []; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + expect(receivedChunks).toHaveLength(sampleChunks.length); + expect(receivedChunks[0]).toEqual({ type: "text-start", id: "part-1" }); + expect(receivedChunks[1]).toEqual({ type: "text-delta", id: "part-1", delta: "Hello" }); + expect(receivedChunks[4]).toEqual({ type: "text-end", id: "part-1" }); + }); + + it("should send the correct payload to the trigger API", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_test" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-chat-task", + accessToken: "test-token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [createUserMessage("Hello!")]; + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-123", + messageId: undefined, + messages, + abortSignal: undefined, + metadata: { custom: "data" }, + }); + + // Verify the trigger fetch call + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + expect(triggerCall).toBeDefined(); + const triggerUrl = + typeof triggerCall![0] === "string" ? triggerCall![0] : triggerCall![0].toString(); + expect(triggerUrl).toContain("/api/v1/tasks/my-chat-task/trigger"); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = triggerBody.payload; + expect(payload.messages).toEqual(messages); + expect(payload.chatId).toBe("chat-123"); + expect(payload.trigger).toBe("submit-message"); + expect(payload.metadata).toEqual({ custom: "data" }); + }); + + it("should use the correct stream URL with custom streamKey", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_custom" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + streamKey: "my-custom-stream", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Verify the stream URL uses the custom stream key + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes( + "/realtime/v1/streams/" + ) + ); + + expect(streamCall).toBeDefined(); + const streamUrl = + typeof streamCall![0] === "string" ? streamCall![0] : streamCall![0].toString(); + expect(streamUrl).toContain("/realtime/v1/streams/run_custom/my-custom-stream"); + }); + + it("should include extra headers in stream requests", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_hdrs" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + headers: { "X-Custom-Header": "custom-value" }, + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Verify the stream request includes custom headers + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes( + "/realtime/v1/streams/" + ) + ); + + expect(streamCall).toBeDefined(); + const requestHeaders = streamCall![1]?.headers as Record; + expect(requestHeaders["X-Custom-Header"]).toBe("custom-value"); + }); + }); + + describe("reconnectToStream", () => { + it("should return null when no session exists for chatId", async () => { + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + }); + + const result = await transport.reconnectToStream({ + chatId: "nonexistent-chat", + }); + + expect(result).toBeNull(); + }); + + it("should reconnect to an existing session", async () => { + const triggerRunId = "run_reconnect"; + const publicToken = "pub_reconnect_token"; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: triggerRunId }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "part-1" }, + { type: "text-delta", id: "part-1", delta: "Reconnected!" }, + { type: "text-end", id: "part-1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First, send messages to establish a session + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-reconnect", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Now reconnect + const stream = await transport.reconnectToStream({ + chatId: "chat-reconnect", + }); + + expect(stream).toBeInstanceOf(ReadableStream); + + // Read the stream + const reader = stream!.getReader(); + const receivedChunks: UIMessageChunk[] = []; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + expect(receivedChunks.length).toBeGreaterThan(0); + }); + }); + + describe("renewRunAccessToken", () => { + it("reconnects after renewing PAT when SSE returns 401", async () => { + const renewSpy = vi.fn().mockResolvedValue("fresh_pat"); + const streamFetchCountByRun = new Map(); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_renew_sse" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_initial", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const runMatch = urlStr.match(/\/streams\/([^/]+)\//); + const runKey = runMatch?.[1] ?? "unknown"; + const n = (streamFetchCountByRun.get(runKey) ?? 0) + 1; + streamFetchCountByRun.set(runKey, n); + + if (n === 2) { + return new Response(null, { status: 401 }); + } + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "trigger-token", + baseURL: "https://api.test.trigger.dev", + renewRunAccessToken: renewSpy, + }); + + const firstStream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-renew-sse", + messageId: undefined, + messages: [createUserMessage("Hi")], + abortSignal: undefined, + }); + const firstReader = firstStream.getReader(); + while (true) { + const { done } = await firstReader.read(); + if (done) break; + } + + const stream = await transport.reconnectToStream({ chatId: "chat-renew-sse" }); + expect(stream).toBeInstanceOf(ReadableStream); + + const reader = stream!.getReader(); + const receivedChunks: UIMessageChunk[] = []; + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + expect(receivedChunks.length).toBeGreaterThan(0); + expect(renewSpy).toHaveBeenCalledWith({ + chatId: "chat-renew-sse", + runId: "run_renew_sse", + }); + + const patStreamCall = (global.fetch as ReturnType).mock.calls.find( + (call: unknown[]) => { + const u = typeof call[0] === "string" ? call[0] : (call[0] as URL).toString(); + if (!u.includes("/realtime/v1/streams/")) return false; + const h = (call[1] as RequestInit | undefined)?.headers as Record; + return h?.["Authorization"] === "Bearer fresh_pat"; + } + ); + expect(patStreamCall).toBeDefined(); + }); + + it("surfaces 401 when renewal returns no token on reconnect", async () => { + const renewSpy = vi.fn().mockResolvedValue(undefined); + const streamFetchCountByRun = new Map(); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_fail_renew" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_initial", + }, + }); + } + if (urlStr.includes("/realtime/v1/streams/")) { + const runMatch = urlStr.match(/\/streams\/([^/]+)\//); + const runKey = runMatch?.[1] ?? "unknown"; + const n = (streamFetchCountByRun.get(runKey) ?? 0) + 1; + streamFetchCountByRun.set(runKey, n); + + if (n === 1) { + const turnDone = { type: "__trigger_turn_complete", publicAccessToken: "pub_initial" }; + return new Response(createSSEStream(sseEncode([turnDone])), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + return new Response(null, { status: 401 }); + } + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "trigger-token", + baseURL: "https://api.test.trigger.dev", + renewRunAccessToken: renewSpy, + }); + + const firstStream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fail-renew", + messageId: undefined, + messages: [createUserMessage("Hi")], + abortSignal: undefined, + }); + const fr = firstStream.getReader(); + while (true) { + const { done } = await fr.read(); + if (done) break; + } + + const stream = await transport.reconnectToStream({ chatId: "chat-fail-renew" }); + const reader = stream!.getReader(); + await expect(reader.read()).rejects.toMatchObject({ status: 401 }); + expect(renewSpy).toHaveBeenCalledWith({ + chatId: "chat-fail-renew", + runId: "run_fail_renew", + }); + }); + + it("retries sendInputStream after 401 when renewRunAccessToken returns a new PAT", async () => { + let inputCalls = 0; + const renewSpy = vi.fn().mockResolvedValue("pat_after_renew"); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_input_renew" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_session", + }, + }); + } + + if (urlStr.includes("/input/")) { + inputCalls++; + if (inputCalls === 1) { + return new Response(JSON.stringify({ error: "Unauthorized" }), { status: 401 }); + } + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const completeChunk = { type: "__trigger_turn_complete", publicAccessToken: "pat_hold" }; + return new Response(createSSEStream(sseEncode([completeChunk])), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "trigger-token", + baseURL: "https://api.test.trigger.dev", + renewRunAccessToken: renewSpy, + }); + + const s1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-first", + messageId: undefined, + messages: [createUserMessage("One")], + abortSignal: undefined, + }); + const r1 = s1.getReader(); + while (true) { + const { done } = await r1.read(); + if (done) break; + } + + const s2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-first", + messageId: undefined, + messages: [createUserMessage("One"), createUserMessage("Two")], + abortSignal: undefined, + }); + const r2 = s2.getReader(); + while (true) { + const { done } = await r2.read(); + if (done) break; + } + + expect(renewSpy).toHaveBeenCalledWith({ + chatId: "chat-first", + runId: "run_input_renew", + }); + expect(inputCalls).toBe(2); + }); + }); + + describe("createChatTransport", () => { + it("should create a TriggerChatTransport instance", () => { + const transport = createChatTransport({ + task: "my-task", + accessToken: "token", + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + + it("should pass options through to the transport", () => { + const transport = createChatTransport({ + task: "custom-task", + accessToken: "custom-token", + baseURL: "https://custom.example.com", + streamKey: "custom-key", + headers: { "X-Test": "value" }, + }); + + expect(transport).toBeInstanceOf(TriggerChatTransport); + }); + }); + + describe("publicAccessToken from trigger response", () => { + it("should use x-trigger-jwt from trigger response as the stream auth token", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + // Return with x-trigger-jwt header — this public token should be + // used for the subsequent stream subscription request. + return new Response(JSON.stringify({ id: "run_pat" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "server-generated-public-token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Verify the Authorization header uses the server-generated token + const authHeader = (init?.headers as Record)?.["Authorization"]; + expect(authHeader).toBe("Bearer server-generated-public-token"); + + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "caller-token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-pat", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }); + + // Consume the stream + const reader = stream.getReader(); + while (true) { + const { done } = await reader.read(); + if (done) break; + } + + // Verify the stream subscription used the public token, not the caller token + const streamCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes( + "/realtime/v1/streams/" + ) + ); + expect(streamCall).toBeDefined(); + const streamHeaders = streamCall![1]?.headers as Record; + expect(streamHeaders["Authorization"]).toBe("Bearer server-generated-public-token"); + }); + }); + + describe("error handling", () => { + it("should propagate trigger API errors", async () => { + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ error: "Task not found" }), { + status: 404, + headers: { "content-type": "application/json" }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "nonexistent-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + await expect( + transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-error", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + }) + ).rejects.toThrow(); + }); + }); + + describe("abort signal", () => { + it("should close the stream gracefully when aborted", async () => { + let streamResolve: (() => void) | undefined; + const streamWait = new Promise((resolve) => { + streamResolve = resolve; + }); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_abort" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Create a slow stream that waits before sending data + const stream = new ReadableStream({ + async start(controller) { + const encoder = new TextEncoder(); + controller.enqueue( + encoder.encode( + `id: 0\ndata: ${JSON.stringify({ type: "text-start", id: "p1" })}\n\n` + ) + ); + // Wait for the test to signal it's done + await streamWait; + controller.close(); + }, + }); + + return new Response(stream, { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const abortController = new AbortController(); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-abort", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: abortController.signal, + }); + + // Read the first chunk + const reader = stream.getReader(); + const first = await reader.read(); + expect(first.done).toBe(false); + + // Abort and clean up + abortController.abort(); + streamResolve?.(); + + // The stream should close — reading should return done + const next = await reader.read(); + expect(next.done).toBe(true); + }); + }); + + describe("multiple sessions", () => { + it("should track multiple chat sessions independently", async () => { + let callCount = 0; + + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + callCount++; + return new Response(JSON.stringify({ id: `run_multi_${callCount}` }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": `token_${callCount}`, + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Include turn-complete chunk so the session is preserved + const chunks = [...sampleChunks, turnCompleteChunk]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // Start two independent chat sessions and consume the streams + const s1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "session-a", + messageId: undefined, + messages: [createUserMessage("Hello A")], + abortSignal: undefined, + }); + const r1 = s1.getReader(); + while (!(await r1.read()).done) {} + + const s2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "session-b", + messageId: undefined, + messages: [createUserMessage("Hello B")], + abortSignal: undefined, + }); + const r2 = s2.getReader(); + while (!(await r2.read()).done) {} + + // Both sessions should be independently reconnectable + const streamA = await transport.reconnectToStream({ chatId: "session-a" }); + const streamB = await transport.reconnectToStream({ chatId: "session-b" }); + const streamC = await transport.reconnectToStream({ chatId: "nonexistent" }); + + expect(streamA).toBeInstanceOf(ReadableStream); + expect(streamB).toBeInstanceOf(ReadableStream); + expect(streamC).toBeNull(); + }); + }); + + describe("dynamic accessToken", () => { + it("should call the accessToken function for each sendMessages call", async () => { + let tokenCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: `run_dyn_${tokenCallCount}` }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "stream-token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: () => { + tokenCallCount++; + return `dynamic-token-${tokenCallCount}`; + }, + baseURL: "https://api.test.trigger.dev", + }); + + // First call — the token function should be invoked + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-dyn-1", + messageId: undefined, + messages: [createUserMessage("first")], + abortSignal: undefined, + }); + + const firstCount = tokenCallCount; + expect(firstCount).toBeGreaterThanOrEqual(1); + + // Second call — the token function should be invoked again + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-dyn-2", + messageId: undefined, + messages: [createUserMessage("second")], + abortSignal: undefined, + }); + + // Token function was called at least once more + expect(tokenCallCount).toBeGreaterThan(firstCount); + }); + }); + + describe("body merging", () => { + it("should merge ChatRequestOptions.body into the task payload", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_body" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-body", + messageId: undefined, + messages: [createUserMessage("test")], + abortSignal: undefined, + body: { systemPrompt: "You are helpful", temperature: 0.7 }, + }); + + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = triggerBody.payload; + + // body properties should be merged into the payload + expect(payload.systemPrompt).toBe("You are helpful"); + expect(payload.temperature).toBe(0.7); + // Standard fields should still be present + expect(payload.chatId).toBe("chat-body"); + expect(payload.trigger).toBe("submit-message"); + }); + }); + + describe("message types", () => { + it("should handle regenerate-message trigger", async () => { + const fetchSpy = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_regen" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(""), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + global.fetch = fetchSpy; + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages: UIMessage[] = [ + createUserMessage("Hello!"), + createAssistantMessage("Hi there!"), + ]; + + await transport.sendMessages({ + trigger: "regenerate-message", + chatId: "chat-regen", + messageId: "msg-to-regen", + messages, + abortSignal: undefined, + }); + + // Verify the payload includes the regenerate trigger type and messageId + const triggerCall = fetchSpy.mock.calls.find((call: any[]) => + (typeof call[0] === "string" ? call[0] : call[0].toString()).includes("/trigger") + ); + + const triggerBody = JSON.parse(triggerCall![1]?.body as string); + const payload = triggerBody.payload; + expect(payload.trigger).toBe("regenerate-message"); + expect(payload.messageId).toBe("msg-to-regen"); + }); + }); + + describe("lastEventId tracking", () => { + it("should pass lastEventId to SSE subscription on subsequent turns", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + + let triggerCallCount = 0; + const streamFetchCalls: { url: string; headers: Record }[] = []; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response(JSON.stringify({ id: "run_eid" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token_eid", + }, + }); + } + + // Handle input stream sends (for second message) + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + streamFetchCalls.push({ + url: urlStr, + headers: (init?.headers as Record) ?? {}, + }); + + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + turnCompleteChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message — triggers a new run + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-eid", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + // Second message — sends via input stream + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-eid", + messageId: undefined, + messages: [ + createUserMessage("Hello"), + createAssistantMessage("Hi!"), + createUserMessage("What's up?"), + ], + abortSignal: undefined, + }); + + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // The second stream subscription should include a Last-Event-ID header + expect(streamFetchCalls.length).toBe(2); + const secondStreamHeaders = streamFetchCalls[1]!.headers; + // SSEStreamSubscription passes lastEventId as the Last-Event-ID header + expect(secondStreamHeaders["Last-Event-ID"]).toBeDefined(); + }); + }); + + describe("minimal wire payloads", () => { + it("should send only new messages via input stream on turn 2+", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + const inputStreamPayloads: any[] = []; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_minimal" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token_minimal", + }, + }); + } + + // Capture input stream payloads (ApiClient wraps in { data: ... }) + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + const body = JSON.parse(init?.body as string); + inputStreamPayloads.push(body.data); + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [...sampleChunks, turnCompleteChunk]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const userMsg1 = createUserMessage("Hello"); + const assistantMsg = createAssistantMessage("Hi there!"); + const userMsg2 = createUserMessage("What's up?"); + + // Turn 1 — triggers a new run with full history + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-minimal", + messageId: undefined, + messages: [userMsg1], + abortSignal: undefined, + }); + const r1 = stream1.getReader(); + while (!(await r1.read()).done) {} + + // Turn 2 — sends via input stream, should only include NEW messages + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-minimal", + messageId: undefined, + messages: [userMsg1, assistantMsg, userMsg2], + abortSignal: undefined, + }); + const r2 = stream2.getReader(); + while (!(await r2.read()).done) {} + + // Verify: the input stream payload should only contain the new user message + expect(inputStreamPayloads).toHaveLength(1); + const sentPayload = inputStreamPayloads[0]; + // Only the new user message should be sent (backend already has the assistant response) + expect(sentPayload.messages).toHaveLength(1); + expect(sentPayload.messages[0]).toEqual(userMsg2); + }); + + it("should send full history on first message (trigger)", async () => { + let triggerPayload: any; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerPayload = JSON.parse(init?.body as string); + return new Response(JSON.stringify({ id: "run_full" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token_full", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + return new Response(createSSEStream(sseEncode(sampleChunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const messages = [ + createUserMessage("Hello"), + createAssistantMessage("Hi!"), + createUserMessage("More"), + ]; + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-full", + messageId: undefined, + messages, + abortSignal: undefined, + }); + + // First message always sends full history via trigger + expect(triggerPayload.payload.messages).toHaveLength(3); + }); + }); + + describe("AbortController cleanup", () => { + it("should terminate SSE connection after intercepting control chunk", async () => { + const controlChunk = { type: "__trigger_turn_complete" }; + + let streamAborted = false; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_abort_cleanup" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // Track abort signal + const signal = init?.signal; + if (signal) { + signal.addEventListener("abort", () => { + streamAborted = true; + }); + } + + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + controlChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-abort-cleanup", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Consume all chunks + const reader = stream.getReader(); + while (true) { + const { done } = await reader.read(); + if (done) break; + } + + // The internal AbortController should have aborted the fetch + expect(streamAborted).toBe(true); + }); + }); + + describe("async accessToken", () => { + it("should accept an async function for accessToken", async () => { + let tokenCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: `run_async_${tokenCallCount}` }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "stream-token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: async () => { + tokenCallCount++; + // Simulate async work (e.g. server action) + await new Promise((r) => setTimeout(r, 1)); + return `async-token-${tokenCallCount}`; + }, + baseURL: "https://api.test.trigger.dev", + }); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-async", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + expect(tokenCallCount).toBe(1); + }); + + it("should not resolve async token for input stream send flow", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + + let tokenCallCount = 0; + let inputStreamSendCalled = false; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_async_wp" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "stream-token", + }, + }); + } + + // Handle input stream sends + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + inputStreamSendCalled = true; + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + turnCompleteChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: async () => { + tokenCallCount++; + await new Promise((r) => setTimeout(r, 1)); + return `async-wp-token-${tokenCallCount}`; + }, + baseURL: "https://api.test.trigger.dev", + }); + + // First message — triggers a new run (calls async token) + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-async-wp", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + const firstTokenCount = tokenCallCount; + + // Second message — should send via input stream (does NOT call async token) + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-async-wp", + messageId: undefined, + messages: [ + createUserMessage("Hello"), + createAssistantMessage("Hi!"), + createUserMessage("More"), + ], + abortSignal: undefined, + }); + + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // Token function should NOT have been called again for the input stream path + expect(tokenCallCount).toBe(firstTokenCount); + expect(inputStreamSendCalled).toBe(true); + }); + }); + + describe("single-run mode (input stream loop)", () => { + it("should not forward turn-complete control chunk to consumer", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_single" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + turnCompleteChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-single", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Read all chunks — the control chunk should NOT appear + const reader = stream.getReader(); + const receivedChunks: UIMessageChunk[] = []; + while (true) { + const { done, value } = await reader.read(); + if (done) break; + receivedChunks.push(value); + } + + // All AI SDK chunks should be forwarded + expect(receivedChunks.length).toBe(sampleChunks.length + 1); // +1 for the finish chunk + // Control chunk should not be in the output + expect(receivedChunks.every((c) => c.type !== ("__trigger_turn_complete" as any))).toBe(true); + }); + + it("should send via input stream on second message instead of triggering a new run", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + + let triggerCallCount = 0; + let inputStreamSendCalled = false; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL, init?: RequestInit) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response(JSON.stringify({ id: "run_resume" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + }); + } + + // Handle input stream sends + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + inputStreamSendCalled = true; + return new Response(JSON.stringify({ ok: true }), { + status: 200, + headers: { "content-type": "application/json" }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + turnCompleteChunk, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message — triggers a new run + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-resume", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Consume stream + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + expect(triggerCallCount).toBe(1); + + // Second message — should send via input stream instead of triggering + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-resume", + messageId: undefined, + messages: [ + createUserMessage("Hello"), + createAssistantMessage("Hi!"), + createUserMessage("How are you?"), + ], + abortSignal: undefined, + }); + + // Consume second stream + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // Should NOT have triggered a second run + expect(triggerCallCount).toBe(1); + // Should have sent via input stream + expect(inputStreamSendCalled).toBe(true); + }); + + it("should fall back to triggering a new run if stream closes without control chunk", async () => { + let triggerCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response(JSON.stringify({ id: `run_fallback_${triggerCallCount}` }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // No control chunk — stream just ends after the finish + const chunks: UIMessageChunk[] = [ + { type: "text-start", id: "p1" }, + { type: "text-delta", id: "p1", delta: "Hello" }, + { type: "text-end", id: "p1" }, + ]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fallback", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + expect(triggerCallCount).toBe(1); + + // Second message — no waitpoint token stored, should trigger a new run + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fallback", + messageId: undefined, + messages: [ + createUserMessage("Hello"), + createAssistantMessage("Hi!"), + createUserMessage("Again"), + ], + abortSignal: undefined, + }); + + // Should have triggered a second run + expect(triggerCallCount).toBe(2); + }); + + it("should fall back to new run when sendInputStream fails", async () => { + const turnCompleteChunk = { type: "__trigger_turn_complete" }; + + let triggerCallCount = 0; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/api/v1/tasks/") && urlStr.includes("/trigger")) { + triggerCallCount++; + return new Response(JSON.stringify({ id: `run_fail_${triggerCallCount}` }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_token", + }, + }); + } + + // Input stream send fails + if (urlStr.includes("/realtime/v1/streams/") && urlStr.includes("/input/")) { + return new Response(JSON.stringify({ error: "Run not found" }), { + status: 404, + headers: { "content-type": "application/json" }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks: (UIMessageChunk | Record)[] = [ + ...sampleChunks, + { type: "finish" as const, id: "part-1" } as UIMessageChunk, + turnCompleteChunk, + ]; + + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + }); + + // First message + const stream1 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fail", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + const reader1 = stream1.getReader(); + while (true) { + const { done } = await reader1.read(); + if (done) break; + } + + expect(triggerCallCount).toBe(1); + + // Second message — sendInputStream will fail, should fall back to new run + const stream2 = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-fail", + messageId: undefined, + messages: [ + createUserMessage("Hello"), + createAssistantMessage("Hi!"), + createUserMessage("Again"), + ], + abortSignal: undefined, + }); + + const reader2 = stream2.getReader(); + while (true) { + const { done } = await reader2.read(); + if (done) break; + } + + // Should have triggered a second run as fallback + expect(triggerCallCount).toBe(2); + }); + }); + + describe("onSessionChange", () => { + it("should fire when a new session is created", async () => { + const onSessionChange = vi.fn(); + const triggerRunId = "run_session_new"; + const publicToken = "pub_session_new"; + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: triggerRunId }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": publicToken, + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [...sampleChunks, { type: "__trigger_turn_complete" }]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + onSessionChange, + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-1", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Session created notification should have fired + expect(onSessionChange).toHaveBeenCalledWith("chat-1", { + runId: triggerRunId, + publicAccessToken: publicToken, + lastEventId: undefined, + }); + + // Consume stream + const reader = stream.getReader(); + while (!(await reader.read()).done) {} + + // Should also fire with updated lastEventId on turn complete + const lastCall = onSessionChange.mock.calls[onSessionChange.mock.calls.length - 1]!; + expect(lastCall![0]).toBe("chat-1"); + expect(lastCall![1]).not.toBeNull(); + expect(lastCall![1].lastEventId).toBeDefined(); + }); + + it("should preserve session when stream ends naturally (run stays alive between turns)", async () => { + const onSessionChange = vi.fn(); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_end" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_end", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + // No turn-complete chunk — stream ends naturally (run completed) + return new Response(createSSEStream(sseEncode(sampleChunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + onSessionChange, + }); + + const stream = await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-end", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Consume the stream fully + const reader = stream.getReader(); + while (!(await reader.read()).done) {} + + // Session should have been created but NOT deleted — the run stays + // alive between turns and the session is needed for reconnection. + expect(onSessionChange).toHaveBeenCalledWith( + "chat-end", + expect.objectContaining({ + runId: "run_end", + }) + ); + expect(onSessionChange).not.toHaveBeenCalledWith("chat-end", null); + }); + + it("should be updatable via setOnSessionChange", async () => { + const onSessionChange1 = vi.fn(); + const onSessionChange2 = vi.fn(); + + global.fetch = vi.fn().mockImplementation(async (url: string | URL) => { + const urlStr = typeof url === "string" ? url : url.toString(); + + if (urlStr.includes("/trigger")) { + return new Response(JSON.stringify({ id: "run_update" }), { + status: 200, + headers: { + "content-type": "application/json", + "x-trigger-jwt": "pub_update", + }, + }); + } + + if (urlStr.includes("/realtime/v1/streams/")) { + const chunks = [...sampleChunks, { type: "__trigger_turn_complete" }]; + return new Response(createSSEStream(sseEncode(chunks)), { + status: 200, + headers: { + "content-type": "text/event-stream", + "X-Stream-Version": "v1", + }, + }); + } + + throw new Error(`Unexpected fetch URL: ${urlStr}`); + }); + + const transport = new TriggerChatTransport({ + task: "my-task", + accessToken: "token", + baseURL: "https://api.test.trigger.dev", + onSessionChange: onSessionChange1, + }); + + // Update the callback before sending + transport.setOnSessionChange(onSessionChange2); + + await transport.sendMessages({ + trigger: "submit-message", + chatId: "chat-update", + messageId: undefined, + messages: [createUserMessage("Hello")], + abortSignal: undefined, + }); + + // Only onSessionChange2 should have been called + expect(onSessionChange1).not.toHaveBeenCalled(); + expect(onSessionChange2).toHaveBeenCalled(); + }); + }); +}); diff --git a/packages/trigger-sdk/src/v3/chat.ts b/packages/trigger-sdk/src/v3/chat.ts new file mode 100644 index 00000000000..760788897c0 --- /dev/null +++ b/packages/trigger-sdk/src/v3/chat.ts @@ -0,0 +1,923 @@ +/** + * @module @trigger.dev/sdk/chat + * + * Browser-safe module for AI SDK chat transport integration. + * Use this on the frontend with the AI SDK's `useChat` hook. + * + * For backend helpers (`chatTask`, `pipeChat`), use `@trigger.dev/sdk/ai` instead. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; + * + * function Chat({ accessToken }: { accessToken: string }) { + * const { messages, sendMessage, status } = useChat({ + * transport: new TriggerChatTransport({ + * task: "my-chat-task", + * accessToken: ({ chatId }) => mintTriggerToken(chatId), + * }), + * }); + * } + * ``` + */ + +import type { ChatTransport, UIMessage, UIMessageChunk, ChatRequestOptions } from "ai"; +import { ApiClient, SSEStreamSubscription } from "@trigger.dev/core/v3"; + +/** + * Detect 401/403 from realtime/input-stream calls without relying on `instanceof` + * (Vitest can load duplicate `@trigger.dev/core` copies, which breaks subclass checks). + */ +function isRunPatAuthError(error: unknown): boolean { + if (error === null || typeof error !== "object") { + return false; + } + const e = error as { name?: string; status?: number }; + return e.name === "TriggerApiError" && (e.status === 401 || e.status === 403); +} +import { CHAT_MESSAGES_STREAM_ID, CHAT_STOP_STREAM_ID } from "./chat-constants.js"; + +const DEFAULT_STREAM_KEY = "chat"; +const DEFAULT_BASE_URL = "https://api.trigger.dev"; +const DEFAULT_STREAM_TIMEOUT_SECONDS = 120; + +/** + * Arguments passed to {@link TriggerChatTransportOptions.renewRunAccessToken}. + */ +export type RenewRunAccessTokenParams = { + /** Same `chatId` passed to `sendMessages` / `useChat` — your app’s conversation id. */ + chatId: string; + /** The durable Trigger.dev run backing this chat session. */ + runId: string; +}; + +/** + * Arguments passed when resolving {@link TriggerChatTransportOptions.accessToken} as a function. + */ +export type ResolveChatAccessTokenParams = { + /** Conversation id for this trigger or preload. */ + chatId: string; + /** + * `trigger` — token used to call `triggerTask` from `sendMessages` (new run or after session ended). + * `preload` — same, but from `preload()`. + */ + purpose: "trigger" | "preload"; +}; + +/** + * Options for creating a TriggerChatTransport. + */ +export type TriggerChatTransportOptions = { + /** + * The Trigger.dev task ID to trigger for chat completions. + * This task should be defined using `chatTask()` from `@trigger.dev/sdk/ai`, + * or a regular `task()` that uses `pipeChat()`. + */ + task: string; + + /** + * An access token for authenticating with the Trigger.dev API. + * + * This must be a token with permission to trigger the task. You can use: + * - A **trigger public token** created via `auth.createTriggerPublicToken(taskId)` (recommended for frontend use) + * - A **secret API key** (for server-side use only — never expose in the browser) + * + * Can also be a function that returns a token string (sync or async), + * useful for dynamic token refresh or passing a Next.js server action directly. + * The function receives `chatId` and `purpose` (`trigger` vs `preload`) so you can mint or log per conversation. + */ + accessToken: string | ((params: ResolveChatAccessTokenParams) => string | Promise); + + /** + * Base URL for the Trigger.dev API. + * @default "https://api.trigger.dev" + */ + baseURL?: string; + + /** + * The stream key where the task pipes UIMessageChunk data. + * When using `chatTask()` or `pipeChat()`, this is handled automatically. + * Only set this if you're using a custom stream key. + * + * @default "chat" + */ + streamKey?: string; + + /** + * Additional headers to include in API requests to Trigger.dev. + */ + headers?: Record; + + /** + * The number of seconds to wait for the realtime stream to produce data + * before timing out. + * + * @default 120 + */ + streamTimeoutSeconds?: number; + + /** + * Default client data included in every request payload. + * Merged with per-call `metadata` from `sendMessage()` — per-call values + * take precedence over transport-level defaults. + * + * When the task uses `clientDataSchema`, this is typed to match the schema. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * clientData: { userId: currentUser.id }, + * }); + * ``` + */ + clientData?: TClientData extends Record ? TClientData : Record; + + /** + * Restore active chat sessions from external storage (e.g. localStorage). + * + * After a page refresh, pass previously persisted sessions here so the + * transport can reconnect to existing runs instead of starting new ones. + * Use `getSession()` to retrieve session state for persistence. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * sessions: { + * "chat-abc": { runId: "run_123", publicAccessToken: "...", lastEventId: "42" }, + * }, + * }); + * ``` + */ + sessions?: Record; + + /** + * Called whenever a chat session's state changes. + * + * Fires when: + * - A new session is created (after triggering a task) + * - A turn completes (lastEventId updated) + * - A session is removed (run ended or input stream send failed) — `session` will be `null` + * + * Use this to persist session state for reconnection after page refreshes, + * without needing to call `getSession()` manually. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * onSessionChange: (chatId, session) => { + * if (session) { + * localStorage.setItem(`session:${chatId}`, JSON.stringify(session)); + * } else { + * localStorage.removeItem(`session:${chatId}`); + * } + * }, + * }); + * ``` + */ + onSessionChange?: ( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } | null + ) => void; + + /** + * Options forwarded to the Trigger.dev API when starting a new run. + * Only applies to the first message — subsequent messages reuse the same run. + * + * A `chat:{chatId}` tag is automatically added to every run. + * + * @example + * ```ts + * new TriggerChatTransport({ + * task: "my-chat", + * accessToken, + * triggerOptions: { + * tags: ["user:123"], + * queue: "chat-queue", + * }, + * }); + * ``` + */ + triggerOptions?: { + /** Additional tags for the run. A `chat:{chatId}` tag is always added automatically. */ + tags?: string[]; + /** Queue name for the run. */ + queue?: string; + /** Maximum retry attempts. */ + maxAttempts?: number; + /** Machine preset for the run. */ + machine?: + | "micro" + | "small-1x" + | "small-2x" + | "medium-1x" + | "medium-2x" + | "large-1x" + | "large-2x"; + /** Priority (lower = higher priority). */ + priority?: number; + }; + + /** + * Mint a fresh run-scoped public access token for an existing run (same shape as `x-trigger-jwt` + * after trigger). Call from your server with `auth.createPublicToken` using `TRIGGER_ACCESS_KEY` + * and scopes `read:runs:` and `write:inputStreams:`. + * + * When the stored PAT expires, the transport invokes this once and retries the failing realtime + * or input-stream request. If renewal fails or is omitted, auth errors are surfaced to the caller. + * + * Receives `chatId` and `runId` so your server action can persist the new PAT keyed by conversation. + */ + renewRunAccessToken?: ( + params: RenewRunAccessTokenParams + ) => string | undefined | null | Promise; +}; + +/** + * Internal state for tracking active chat sessions. + * @internal + */ +type ChatSessionState = { + runId: string; + publicAccessToken: string; + /** Last SSE event ID — used to resume the stream without replaying old events. */ + lastEventId?: string; + /** Set when the stream was aborted mid-turn (stop). On reconnect, skip chunks until __trigger_turn_complete. */ + skipToTurnComplete?: boolean; +}; + +/** + * A custom AI SDK `ChatTransport` that runs chat completions as durable Trigger.dev tasks. + * + * When `sendMessages` is called, the transport: + * 1. Triggers a Trigger.dev task (or sends to an existing run via input streams) + * 2. Subscribes to the task's realtime stream to receive `UIMessageChunk` data + * 3. Returns a `ReadableStream` that the AI SDK processes natively + * + * Calling `stop()` from `useChat` sends a stop signal via input streams, which + * aborts the current `streamText` call in the task without ending the run. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { TriggerChatTransport } from "@trigger.dev/sdk/chat"; + * + * function Chat() { + * const { messages, sendMessage, stop, status } = useChat({ + * transport: new TriggerChatTransport({ + * task: "my-chat-task", + * accessToken: ({ chatId }) => fetchTriggerToken(chatId), + * }), + * }); + * + * // stop() sends a stop signal — the task aborts streamText but keeps the run alive + * } + * ``` + */ +export class TriggerChatTransport implements ChatTransport { + private readonly taskId: string; + private readonly staticAccessToken: string | undefined; + private readonly resolveAccessTokenFn: + | ((params: ResolveChatAccessTokenParams) => string | Promise) + | undefined; + private readonly baseURL: string; + private readonly streamKey: string; + private readonly extraHeaders: Record; + private readonly streamTimeoutSeconds: number; + private readonly defaultMetadata: Record | undefined; + private readonly triggerOptions: TriggerChatTransportOptions["triggerOptions"]; + private _onSessionChange: + | (( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } | null + ) => void) + | undefined; + + private renewRunAccessToken: TriggerChatTransportOptions["renewRunAccessToken"] | undefined; + + private sessions: Map = new Map(); + private activeStreams: Map = new Map(); + + constructor(options: TriggerChatTransportOptions) { + this.taskId = options.task; + if (typeof options.accessToken === "function") { + this.staticAccessToken = undefined; + this.resolveAccessTokenFn = options.accessToken; + } else { + this.staticAccessToken = options.accessToken; + this.resolveAccessTokenFn = undefined; + } + this.baseURL = options.baseURL ?? DEFAULT_BASE_URL; + this.streamKey = options.streamKey ?? DEFAULT_STREAM_KEY; + this.extraHeaders = options.headers ?? {}; + this.streamTimeoutSeconds = options.streamTimeoutSeconds ?? DEFAULT_STREAM_TIMEOUT_SECONDS; + this.defaultMetadata = options.clientData; + this.triggerOptions = options.triggerOptions; + this._onSessionChange = options.onSessionChange; + this.renewRunAccessToken = options.renewRunAccessToken; + + // Restore sessions from external storage + if (options.sessions) { + for (const [chatId, session] of Object.entries(options.sessions)) { + this.sessions.set(chatId, { + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }); + } + } + } + + sendMessages = async ( + options: { + trigger: "submit-message" | "regenerate-message"; + chatId: string; + messageId: string | undefined; + messages: UIMessage[]; + abortSignal: AbortSignal | undefined; + } & ChatRequestOptions + ): Promise> => { + const { trigger, chatId, messageId, messages, abortSignal, body, metadata } = options; + + const mergedMetadata = + this.defaultMetadata || metadata + ? { ...(this.defaultMetadata ?? {}), ...((metadata as Record) ?? {}) } + : undefined; + + const payload = { + ...(body ?? {}), + messages, + chatId, + trigger, + messageId, + metadata: mergedMetadata, + }; + + const session = this.sessions.get(chatId); + let isContinuation = false; + let previousRunId: string | undefined; + // If we have an existing run, send the message via input stream + // to resume the conversation in the same run. + if (session?.runId) { + const minimalPayload = { + ...payload, + messages: trigger === "submit-message" ? messages.slice(-1) : messages, + }; + + const sendChatMessages = async (token: string) => { + const apiClient = new ApiClient(this.baseURL, token); + await apiClient.sendInputStream(session.runId, CHAT_MESSAGES_STREAM_ID, minimalPayload); + }; + + let inputSendOk = false; + + try { + await sendChatMessages(session.publicAccessToken); + inputSendOk = true; + } catch (err) { + if (isRunPatAuthError(err) && this.renewRunAccessToken) { + const newToken = await this.renewRunPatForSession(chatId, session.runId); + if (newToken) { + try { + await sendChatMessages(newToken); + inputSendOk = true; + } catch (err2) { + throw err2; + } + } else { + throw err; + } + } else if (isRunPatAuthError(err)) { + throw err; + } else { + previousRunId = session.runId; + this.sessions.delete(chatId); + this.notifySessionChange(chatId, null); + isContinuation = true; + } + } + + if (inputSendOk) { + const currentSession = this.sessions.get(chatId); + if (!currentSession?.runId) { + throw new Error("TriggerChatTransport: session missing after input stream send"); + } + + const activeStream = this.activeStreams.get(chatId); + if (activeStream) { + activeStream.abort(); + this.activeStreams.delete(chatId); + } + + return this.subscribeToStream( + currentSession.runId, + currentSession.publicAccessToken, + abortSignal, + chatId + ); + } + } + + // First message or run has ended — trigger a new run + const currentToken = await this.resolveAccessToken({ chatId, purpose: "trigger" }); + const apiClient = new ApiClient(this.baseURL, currentToken); + + // Auto-tag with chatId; merge with user-provided tags (API limit: 5 tags) + const autoTags = [`chat:${chatId}`]; + const userTags = this.triggerOptions?.tags ?? []; + const tags = [...autoTags, ...userTags].slice(0, 5); + + const triggerResponse = await apiClient.triggerTask(this.taskId, { + payload: { + ...payload, + continuation: isContinuation, + ...(previousRunId ? { previousRunId } : {}), + }, + options: { + payloadType: "application/json", + tags, + queue: this.triggerOptions?.queue ? { name: this.triggerOptions.queue } : undefined, + maxAttempts: this.triggerOptions?.maxAttempts, + machine: this.triggerOptions?.machine, + priority: this.triggerOptions?.priority, + }, + }); + + const runId = triggerResponse.id; + const publicAccessToken = + "publicAccessToken" in triggerResponse + ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken + : undefined; + + const newSession: ChatSessionState = { + runId, + publicAccessToken: publicAccessToken ?? currentToken, + }; + this.sessions.set(chatId, newSession); + this.notifySessionChange(chatId, newSession); + return this.subscribeToStream(runId, publicAccessToken ?? currentToken, abortSignal, chatId); + }; + + /** + * Send a message to the running task via input stream without disrupting + * the current streaming response. Use this to send steering/pending messages + * while the agent is actively streaming. + * + * Unlike `sendMessage()` from useChat, this does NOT: + * - Add the message to useChat's local message state + * - Cancel the active stream subscription + * - Start a new response stream + * + * The message is delivered to the task's `messagesInput.on()` listener + * and can be injected between tool-call steps via the `pendingMessages` + * configuration. + * + * @returns `true` if the message was sent, `false` if there's no active session. + */ + sendPendingMessage = async ( + chatId: string, + message: UIMessage, + metadata?: Record + ): Promise => { + const session = this.sessions.get(chatId); + if (!session?.runId) return false; + + const mergedMetadata = + this.defaultMetadata || metadata + ? { ...(this.defaultMetadata ?? {}), ...(metadata ?? {}) } + : undefined; + + const payload = { + messages: [message], + chatId, + trigger: "submit-message" as const, + metadata: mergedMetadata, + }; + + const sendPending = async (token: string) => { + const apiClient = new ApiClient(this.baseURL, token); + await apiClient.sendInputStream(session.runId, CHAT_MESSAGES_STREAM_ID, payload); + }; + + try { + await sendPending(session.publicAccessToken); + return true; + } catch (err) { + if (isRunPatAuthError(err) && this.renewRunAccessToken) { + const newToken = await this.renewRunPatForSession(chatId, session.runId); + if (newToken) { + try { + await sendPending(newToken); + return true; + } catch (err2) { + throw err2; + } + } + throw err; + } + if (isRunPatAuthError(err)) { + throw err; + } + return false; + } + }; + + reconnectToStream = async ( + options: { + chatId: string; + } & ChatRequestOptions + ): Promise | null> => { + const session = this.sessions.get(options.chatId); + if (!session) { + return null; + } + + // Deduplicate: if there's already an active stream for this chatId, + // return null so the second caller no-ops. + if (this.activeStreams.has(options.chatId)) { + return null; + } + + const abortController = new AbortController(); + this.activeStreams.set(options.chatId, abortController); + + return this.subscribeToStream( + session.runId, + session.publicAccessToken, + abortController.signal, + options.chatId, + { sendStopOnAbort: false } + ); + }; + + /** + * Get the current session state for a chat, suitable for external persistence. + * + * Returns `undefined` if no active session exists for this chatId. + * Persist the returned value to localStorage so it can be restored + * after a page refresh via `restoreSession()`. + * + * @example + * ```ts + * const session = transport.getSession(chatId); + * if (session) { + * localStorage.setItem(`session:${chatId}`, JSON.stringify(session)); + * } + * ``` + */ + getSession = ( + chatId: string + ): { runId: string; publicAccessToken: string; lastEventId?: string } | undefined => { + const session = this.sessions.get(chatId); + if (!session) return undefined; + return { + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }; + }; + + /** + * Update the `onSessionChange` callback. + * Useful for React hooks that need to update the callback without recreating the transport. + */ + setOnSessionChange( + callback: + | (( + chatId: string, + session: { runId: string; publicAccessToken: string; lastEventId?: string } | null + ) => void) + | undefined + ): void { + this._onSessionChange = callback; + } + + /** + * Update the run PAT renewal callback without recreating the transport. + */ + setRenewRunAccessToken(fn: TriggerChatTransportOptions["renewRunAccessToken"] | undefined): void { + this.renewRunAccessToken = fn; + } + + /** + * Eagerly trigger a run for a chat before the first message is sent. + * This allows initialization (DB setup, context loading) to happen + * while the user is still typing, reducing first-response latency. + * + * The task's `onPreload` hook fires immediately. The run then waits + * for the first message via input stream. When `sendMessages` is called + * later, it detects the existing session and sends via input stream + * instead of triggering a new run. + * + * No-op if a session already exists for this chatId. + */ + async preload(chatId: string, options?: { idleTimeoutInSeconds?: number }): Promise { + // Don't preload if session already exists + if (this.sessions.get(chatId)?.runId) return; + + const payload = { + messages: [] as never[], + chatId, + trigger: "preload" as const, + metadata: this.defaultMetadata, + ...(options?.idleTimeoutInSeconds !== undefined + ? { idleTimeoutInSeconds: options.idleTimeoutInSeconds } + : {}), + }; + + const currentToken = await this.resolveAccessToken({ chatId, purpose: "preload" }); + const apiClient = new ApiClient(this.baseURL, currentToken); + + const autoTags = [`chat:${chatId}`, "preload:true"]; + const userTags = this.triggerOptions?.tags ?? []; + const tags = [...autoTags, ...userTags].slice(0, 5); + + const triggerResponse = await apiClient.triggerTask(this.taskId, { + payload, + options: { + payloadType: "application/json", + tags, + queue: this.triggerOptions?.queue ? { name: this.triggerOptions.queue } : undefined, + maxAttempts: this.triggerOptions?.maxAttempts, + machine: this.triggerOptions?.machine, + priority: this.triggerOptions?.priority, + }, + }); + + const runId = triggerResponse.id; + const publicAccessToken = + "publicAccessToken" in triggerResponse + ? (triggerResponse as { publicAccessToken?: string }).publicAccessToken + : undefined; + + const newSession: ChatSessionState = { + runId, + publicAccessToken: publicAccessToken ?? currentToken, + }; + this.sessions.set(chatId, newSession); + this.notifySessionChange(chatId, newSession); + } + + private async resolveAccessToken(params: ResolveChatAccessTokenParams): Promise { + if (this.staticAccessToken !== undefined) { + return this.staticAccessToken; + } + return await this.resolveAccessTokenFn!(params); + } + + private notifySessionChange(chatId: string, session: ChatSessionState | null): void { + if (!this._onSessionChange) return; + if (session) { + this._onSessionChange(chatId, { + runId: session.runId, + publicAccessToken: session.publicAccessToken, + lastEventId: session.lastEventId, + }); + } else { + this._onSessionChange(chatId, null); + } + } + + private async renewRunPatForSession(chatId: string, runId: string): Promise { + const renew = this.renewRunAccessToken; + if (!renew) { + return undefined; + } + + try { + const token = await renew({ chatId, runId }); + if (typeof token !== "string" || token.length === 0) { + return undefined; + } + + const session = this.sessions.get(chatId); + if (!session || session.runId !== runId) { + return undefined; + } + + session.publicAccessToken = token; + this.notifySessionChange(chatId, session); + return token; + } catch { + return undefined; + } + } + + private subscribeToStream( + runId: string, + accessToken: string, + abortSignal: AbortSignal | undefined, + chatId?: string, + options?: { sendStopOnAbort?: boolean } + ): ReadableStream { + // When resuming a run, skip past previously-seen events + // so we only receive the new turn's response. + const session = chatId ? this.sessions.get(chatId) : undefined; + + // Create an internal AbortController so we can terminate the underlying + // fetch connection when we're done reading (e.g. after intercepting the + // control chunk). Without this, the SSE connection stays open and leaks. + const internalAbort = new AbortController(); + const combinedSignal = abortSignal + ? AbortSignal.any([abortSignal, internalAbort.signal]) + : internalAbort.signal; + + // When the caller aborts (user calls stop()), close the SSE connection. + // Only send a stop signal to the task if this is a user-initiated stop + // (sendStopOnAbort), not an internal stream management abort. + if (abortSignal) { + abortSignal.addEventListener( + "abort", + () => { + if (options?.sendStopOnAbort !== false && session) { + session.skipToTurnComplete = true; + const api = new ApiClient(this.baseURL, session.publicAccessToken); + api.sendInputStream(session.runId, CHAT_STOP_STREAM_ID, { stop: true }).catch(() => {}); // Best-effort + } + internalAbort.abort(); + }, + { once: true } + ); + } + + const streamUrl = `${this.baseURL}/realtime/v1/streams/${runId}/${this.streamKey}`; + + return new ReadableStream({ + start: async (controller) => { + const connectSseOnce = async (token: string) => { + const subscription = new SSEStreamSubscription(streamUrl, { + headers: { + Authorization: `Bearer ${token}`, + ...this.extraHeaders, + }, + signal: combinedSignal, + timeoutInSeconds: this.streamTimeoutSeconds, + lastEventId: session?.lastEventId, + }); + const sseStream = await subscription.subscribe(); + const reader = sseStream.getReader(); + try { + const first = await reader.read(); + if (first.done) { + reader.releaseLock(); + return null; + } + return { reader, primed: first.value }; + } catch (readErr) { + reader.releaseLock(); + throw readErr; + } + }; + + try { + let reader: ReadableStreamDefaultReader<{ + id: string; + chunk: unknown; + timestamp: number; + }>; + let primed: { id: string; chunk: unknown; timestamp: number } | undefined; + + try { + const opened = await connectSseOnce(accessToken); + if (opened === null) { + controller.close(); + return; + } + reader = opened.reader; + primed = opened.primed; + } catch (e) { + if (isRunPatAuthError(e) && chatId && this.renewRunAccessToken) { + const newToken = await this.renewRunPatForSession(chatId, runId); + if (newToken) { + const opened = await connectSseOnce(newToken); + if (opened === null) { + controller.close(); + return; + } + reader = opened.reader; + primed = opened.primed; + } else { + controller.error(e instanceof Error ? e : new Error(String(e))); + return; + } + } else if (isRunPatAuthError(e)) { + controller.error(e instanceof Error ? e : new Error(String(e))); + return; + } else { + throw e; + } + } + + let chunkCount = 0; + + try { + while (true) { + let value: { id: string; chunk: unknown; timestamp: number }; + if (primed !== undefined) { + value = primed; + primed = undefined; + } else { + const next = await reader.read(); + if (next.done) { + controller.close(); + return; + } + value = next.value; + } + + if (combinedSignal.aborted) { + internalAbort.abort(); + await reader.cancel(); + controller.close(); + return; + } + + // Track the last event ID so we can resume from here + if (value.id && session) { + session.lastEventId = value.id; + } + + // Guard against heartbeat or malformed SSE events + if (value.chunk != null && typeof value.chunk === "object") { + const chunk = value.chunk as Record; + + // After a stop, skip leftover chunks from the stopped turn + // until we see the __trigger_turn_complete marker. + if (session?.skipToTurnComplete) { + if (chunk.type === "__trigger_turn_complete") { + session.skipToTurnComplete = false; + chunkCount = 0; + } + continue; + } + + if (chunk.type === "__trigger_turn_complete" && chatId) { + // Update token if a refreshed one was provided in the chunk + if (session && typeof chunk.publicAccessToken === "string") { + session.publicAccessToken = chunk.publicAccessToken; + } + // Notify with updated session (including refreshed token) + if (session) { + this.notifySessionChange(chatId, session); + } + internalAbort.abort(); + try { + controller.close(); + } catch { + // Controller may already be closed + } + return; + } + + chunkCount++; + controller.enqueue(chunk as unknown as UIMessageChunk); + } + } + } catch (readError) { + reader.releaseLock(); + throw readError; + } + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + try { + controller.close(); + } catch { + // Controller may already be closed + } + return; + } + + controller.error(error); + } + }, + }); + } +} + +/** + * Creates a new `TriggerChatTransport` instance. + * + * @example + * ```tsx + * import { useChat } from "@ai-sdk/react"; + * import { createChatTransport } from "@trigger.dev/sdk/chat"; + * + * const transport = createChatTransport({ + * task: "my-chat-task", + * accessToken: publicAccessToken, + * }); + * + * function Chat() { + * const { messages, sendMessage } = useChat({ transport }); + * } + * ``` + */ +export function createChatTransport(options: TriggerChatTransportOptions): TriggerChatTransport { + return new TriggerChatTransport(options); +} diff --git a/packages/trigger-sdk/src/v3/runs.ts b/packages/trigger-sdk/src/v3/runs.ts index 7081c448d75..88e6d2b701c 100644 --- a/packages/trigger-sdk/src/v3/runs.ts +++ b/packages/trigger-sdk/src/v3/runs.ts @@ -358,6 +358,14 @@ export type SubscribeToRunOptions = { * ``` */ skipColumns?: RealtimeRunSkipColumns; + + /** + * An AbortSignal to cancel the subscription. + * + * When the signal is aborted, the underlying SSE connection is closed + * and the async iterator completes. + */ + signal?: AbortSignal; }; /** @@ -403,6 +411,7 @@ function subscribeToRun( closeOnComplete: typeof options?.stopOnCompletion === "boolean" ? options.stopOnCompletion : true, skipColumns: options?.skipColumns, + signal: options?.signal, }); } diff --git a/packages/trigger-sdk/src/v3/shared.ts b/packages/trigger-sdk/src/v3/shared.ts index c03732c12ea..022e36476c2 100644 --- a/packages/trigger-sdk/src/v3/shared.ts +++ b/packages/trigger-sdk/src/v3/shared.ts @@ -90,6 +90,7 @@ import type { TaskWithToolOptions, ToolTask, ToolTaskParameters, + TriggerAndSubscribeOptions, TriggerAndWaitOptions, TriggerApiRequestOptions, TriggerOptions, @@ -214,6 +215,26 @@ export function createTask< }); }, params.id); }, + triggerAndSubscribe: (payload, options) => { + return new TaskRunPromise((resolve, reject) => { + triggerAndSubscribe_internal( + "triggerAndSubscribe()", + params.id, + payload, + undefined, + { + queue: params.queue?.name, + ...options, + } + ) + .then((result) => { + resolve(result); + }) + .catch((error) => { + reject(error); + }); + }, params.id); + }, batchTriggerAndWait: async (items, options) => { return await batchTriggerAndWait_internal( "batchTriggerAndWait()", @@ -258,7 +279,7 @@ export function createTask< } /** - * @deprecated use ai.tool() instead + * @deprecated Use `schemaTask` plus AI SDK `tool()` with `execute: ai.toolExecute(task)` instead. */ export function createToolTask< TIdentifier extends string, @@ -345,6 +366,26 @@ export function createSchemaTask< }); }, params.id); }, + triggerAndSubscribe: (payload, options) => { + return new TaskRunPromise((resolve, reject) => { + triggerAndSubscribe_internal, TOutput>( + "triggerAndSubscribe()", + params.id, + payload, + parsePayload, + { + queue: params.queue?.name, + ...options, + } + ) + .then((result) => { + resolve(result); + }) + .catch((error) => { + reject(error); + }); + }, params.id); + }, batchTriggerAndWait: async (items, options) => { return await batchTriggerAndWait_internal, TOutput>( "batchTriggerAndWait()", @@ -463,6 +504,49 @@ export function triggerAndWait( }, id); } +/** + * Trigger a task and subscribe to its updates via realtime. Unlike `triggerAndWait`, + * this does NOT suspend the parent run — the parent stays alive and subscribes to updates. + * This enables parallel execution and proper abort signal handling. + * + * @param id - The id of the task to trigger + * @param payload + * @param options - Options for the task run, including an optional `signal` to cancel the subscription and child run + * @returns TaskRunPromise + * @example + * ```ts + * import { tasks } from "@trigger.dev/sdk/v3"; + * const result = await tasks.triggerAndSubscribe("my-task", { foo: "bar" }); + * + * if (result.ok) { + * console.log(result.output); + * } else { + * console.error(result.error); + * } + * ``` + */ +export function triggerAndSubscribe( + id: TaskIdentifier, + payload: TaskPayload, + options?: TriggerAndSubscribeOptions +): TaskRunPromise, TaskOutput> { + return new TaskRunPromise, TaskOutput>((resolve, reject) => { + triggerAndSubscribe_internal, TaskPayload, TaskOutput>( + "tasks.triggerAndSubscribe()", + id, + payload, + undefined, + options + ) + .then((result) => { + resolve(result); + }) + .catch((error) => { + reject(error); + }); + }, id); +} + /** * Batch trigger multiple task runs with the given payloads, and wait for the results. Returns the results of the task runs. * @param id - The id of the task to trigger @@ -2439,6 +2523,128 @@ async function triggerAndWait_internal( + name: string, + id: TIdentifier, + payload: TPayload, + parsePayload?: SchemaParseFn, + options?: TriggerAndSubscribeOptions +): Promise> { + const ctx = taskContext.ctx; + + if (!ctx) { + throw new Error("triggerAndSubscribe can only be used from inside a task.run()"); + } + + const apiClient = apiClientManager.clientOrThrow(); + + const parsedPayload = parsePayload ? await parsePayload(payload) : payload; + const payloadPacket = await stringifyIO(parsedPayload); + + const processedIdempotencyKey = await makeIdempotencyKey(options?.idempotencyKey); + const idempotencyKeyOptions = processedIdempotencyKey + ? getIdempotencyKeyOptions(processedIdempotencyKey) + : undefined; + + return await tracer.startActiveSpan( + name, + async (span) => { + const response = await apiClient.triggerTask( + id, + { + payload: payloadPacket.data, + options: { + lockToVersion: taskContext.worker?.version, + queue: options?.queue ? { name: options.queue } : undefined, + concurrencyKey: options?.concurrencyKey, + test: taskContext.ctx?.run.isTest, + payloadType: payloadPacket.dataType, + delay: options?.delay, + ttl: options?.ttl, + tags: options?.tags, + maxAttempts: options?.maxAttempts, + metadata: options?.metadata, + maxDuration: options?.maxDuration, + parentRunId: ctx.run.id, + // NOTE: no resumeParentOnCompletion — parent stays alive and subscribes + idempotencyKey: processedIdempotencyKey?.toString(), + idempotencyKeyTTL: options?.idempotencyKeyTTL, + idempotencyKeyOptions, + machine: options?.machine, + priority: options?.priority, + region: options?.region, + debounce: options?.debounce, + }, + }, + {} + ); + + // Set attributes after trigger so the dashboard can link to the child run + span.setAttribute("messaging.message.id", response.id); + span.setAttribute("runId", response.id); + span.setAttribute(SemanticInternalAttributes.ENTITY_TYPE, "run"); + span.setAttribute(SemanticInternalAttributes.ENTITY_ID, response.id); + + // Optionally cancel the child run when the abort signal fires (default: true) + const cancelOnAbort = options?.cancelOnAbort !== false; + if (options?.signal && cancelOnAbort) { + const onAbort = () => { + apiClient.cancelRun(response.id).catch(() => {}); + }; + if (options.signal.aborted) { + await apiClient.cancelRun(response.id).catch(() => {}); + throw new Error("Aborted"); + } + options.signal.addEventListener("abort", onAbort, { once: true }); + } + + for await (const run of apiClient.subscribeToRun(response.id, { + closeOnComplete: true, + signal: options?.signal, + skipColumns: ["payload"], + })) { + if (run.isSuccess) { + // run.output from subscribeToRun is already deserialized + return { + ok: true as const, + id: response.id, + taskIdentifier: id as TIdentifier, + output: run.output as TOutput, + }; + } + if (run.isFailed || run.isCancelled) { + const error = new Error(run.error?.message ?? `Task ${id} failed (${run.status})`); + if (run.error?.name) error.name = run.error.name; + + return { + ok: false as const, + id: response.id, + taskIdentifier: id as TIdentifier, + error, + }; + } + } + + throw new Error(`Task ${id}: subscription ended without completion`); + }, + { + kind: SpanKind.PRODUCER, + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "trigger", + ...accessoryAttributes({ + items: [ + { + text: id, + variant: "normal", + }, + ], + style: "codepath", + }), + }, + } + ); +} + async function batchTriggerAndWait_internal( name: string, id: TIdentifier, diff --git a/packages/trigger-sdk/src/v3/streams.ts b/packages/trigger-sdk/src/v3/streams.ts index 68edc2a64ab..9ce2652197c 100644 --- a/packages/trigger-sdk/src/v3/streams.ts +++ b/packages/trigger-sdk/src/v3/streams.ts @@ -25,8 +25,10 @@ import { InputStreamOncePromise, type InputStreamOnceResult, type InputStreamWaitOptions, + type InputStreamWaitWithIdleTimeoutOptions, type SendInputStreamOptions, type InferInputStreamType, + type StreamWriteResult, } from "@trigger.dev/core/v3"; import { conditionallyImportAndParsePacket } from "@trigger.dev/core/v3/utils/ioSerialization"; import { tracer } from "./tracer.js"; @@ -139,7 +141,7 @@ function pipe( opts = valueOrOptions as PipeStreamOptions | undefined; } - return pipeInternal(key, value, opts, "streams.pipe()"); + return pipeInternal(key, value, opts, opts?.spanName ?? "streams.pipe()"); } /** @@ -167,6 +169,7 @@ function pipeInternal( [SemanticInternalAttributes.ENTITY_TYPE]: "realtime-stream", [SemanticInternalAttributes.ENTITY_ID]: `${runId}:${key}`, [SemanticInternalAttributes.STYLE_ICON]: "streams", + ...(opts?.collapsed ? { [SemanticInternalAttributes.COLLAPSED]: true } : {}), ...accessoryAttributes({ items: [ { @@ -194,7 +197,9 @@ function pipeInternal( return { stream: instance.stream, - waitUntilComplete: () => instance.wait(), + waitUntilComplete: async () => { + return instance.wait(); + }, }; } catch (error) { // if the error is a signal abort error, we need to end the span but not record an exception @@ -640,7 +645,7 @@ function writerInternal(key: string, options: WriterStreamOptions) } }); - return pipeInternal(key, stream, options, "streams.writer()"); + return pipeInternal(key, stream, options, options.spanName ?? "streams.writer()"); } export type RealtimeDefineStreamOptions = { @@ -656,8 +661,18 @@ function define(opts: RealtimeDefineStreamOptions): RealtimeDefinedStream read(runId, options) { return read(runId, opts.id, options); }, - append(value, options) { - return append(opts.id, value as BodyInit, options); + async append(value, options) { + // Use a single-write writer so objects are serialized the same way + // as stream.writer() — the raw append API sends BodyInit which + // doesn't serialize objects correctly for SSE consumers. + const { waitUntilComplete } = writer(opts.id, { + ...options, + spanName: "streams.append()", + execute: ({ write }) => { + write(value); + }, + }); + await waitUntilComplete(); }, writer(options) { return writer(opts.id, options); @@ -713,7 +728,7 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { return new InputStreamOncePromise((resolve, reject) => { tracer .startActiveSpan( - `inputStream.once()`, + options?.spanName ?? `inputStream.once()`, async () => { const result = await innerPromise; resolve(result as InputStreamOnceResult); @@ -750,23 +765,21 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { const apiClient = apiClientManager.clientOrThrow(); + // Create the waitpoint before the span so we have the entity ID upfront + const response = await apiClient.createInputStreamWaitpoint(ctx.run.id, { + streamId: opts.id, + timeout: options?.timeout, + idempotencyKey: options?.idempotencyKey, + idempotencyKeyTTL: options?.idempotencyKeyTTL, + tags: options?.tags, + lastSeqNum: inputStreams.lastSeqNum(opts.id), + }); + const result = await tracer.startActiveSpan( - `inputStream.wait()`, + options?.spanName ?? `inputStream.wait()`, async (span) => { - // 1. Create a waitpoint linked to this input stream - const response = await apiClient.createInputStreamWaitpoint(ctx.run.id, { - streamId: opts.id, - timeout: options?.timeout, - idempotencyKey: options?.idempotencyKey, - idempotencyKeyTTL: options?.idempotencyKeyTTL, - tags: options?.tags, - lastSeqNum: inputStreams.lastSeqNum(opts.id), - }); - - // Set the entity ID now that we have the waitpoint ID - span.setAttribute(SemanticInternalAttributes.ENTITY_ID, response.waitpointId); - // 2. Block the run on the waitpoint + // 1. Block the run on the waitpoint const waitResponse = await apiClient.waitForWaitpointToken({ runFriendlyId: ctx.run.id, waitpointFriendlyId: response.waitpointId, @@ -776,6 +789,12 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { throw new Error("Failed to block on input stream waitpoint"); } + // 2. Disconnect the SSE tail and clear the buffer before suspending. + // Without this, the tail stays alive during the suspension window and + // may buffer a copy of the same message that will be delivered via the + // waitpoint, causing a duplicate on resume. + inputStreams.disconnectStream(opts.id); + // 3. Suspend the task const waitResult = await runtime.waitUntil(response.waitpointId); @@ -792,6 +811,12 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { : undefined; if (waitResult.ok) { + // Advance the seq counter so the SSE tail doesn't replay + // the record that was consumed via the waitpoint path when + // it lazily reconnects on the next on()/once() call. + const prevSeq = inputStreams.lastSeqNum(opts.id); + inputStreams.setLastSeqNum(opts.id, (prevSeq ?? -1) + 1); + return { ok: true as const, output: data as TData }; } else { const error = new WaitpointTimeoutError(data?.message ?? "Timed out"); @@ -806,6 +831,7 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { attributes: { [SemanticInternalAttributes.STYLE_ICON]: "wait", [SemanticInternalAttributes.ENTITY_TYPE]: "waitpoint", + [SemanticInternalAttributes.ENTITY_ID]: response.waitpointId, streamId: opts.id, ...accessoryAttributes({ items: [ @@ -826,6 +852,45 @@ function input(opts: { id: string }): RealtimeDefinedInputStream { } }); }, + async waitWithIdleTimeout(options) { + const self = this; + const spanName = options.spanName ?? `inputStream.waitWithIdleTimeout()`; + + return tracer.startActiveSpan( + spanName, + async (span) => { + // Idle phase: keep compute alive + if (options.idleTimeoutInSeconds > 0) { + const warm = await inputStreams.once(opts.id, { + timeoutMs: options.idleTimeoutInSeconds * 1000, + }); + if (warm.ok) { + span.setAttribute("wait.resolved", "idle"); + return { ok: true as const, output: warm.output as TData }; + } + } + + // Cold phase: suspend via .wait() — creates a child span + span.setAttribute("wait.resolved", "suspended"); + const waitResult = await self.wait({ + timeout: options.timeout, + spanName: "suspended", + }); + + return waitResult; + }, + { + attributes: { + [SemanticInternalAttributes.STYLE_ICON]: "streams", + streamId: opts.id, + ...accessoryAttributes({ + items: [{ text: opts.id, variant: "normal" }], + style: "codepath", + }), + }, + } + ); + }, async send(runId, data, options) { return tracer.startActiveSpan( `inputStream.send()`, diff --git a/packages/trigger-sdk/src/v3/tasks.ts b/packages/trigger-sdk/src/v3/tasks.ts index 75b7e85e625..5781a104229 100644 --- a/packages/trigger-sdk/src/v3/tasks.ts +++ b/packages/trigger-sdk/src/v3/tasks.ts @@ -20,6 +20,7 @@ import { SubtaskUnwrapError, trigger, triggerAndWait, + triggerAndSubscribe, } from "./shared.js"; export { SubtaskUnwrapError }; @@ -96,6 +97,7 @@ export const tasks = { trigger, batchTrigger, triggerAndWait, + triggerAndSubscribe, batchTriggerAndWait, /** @deprecated Use onStartAttempt instead */ onStart, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 192a5747f2a..6fee55e0521 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -2079,6 +2079,9 @@ importers: evt: specifier: ^2.4.13 version: 2.4.13 + react: + specifier: ^18.0 || ^19.0 + version: 18.3.1 slug: specifier: ^6.0.0 version: 6.1.0 @@ -2101,6 +2104,9 @@ importers: '@types/debug': specifier: ^4.1.7 version: 4.1.7 + '@types/react': + specifier: ^19.2.14 + version: 19.2.14 '@types/slug': specifier: ^5.0.3 version: 5.0.3 @@ -2111,8 +2117,8 @@ importers: specifier: ^8.5.3 version: 8.5.4 ai: - specifier: ^6.0.0 - version: 6.0.3(zod@3.25.76) + specifier: ^6.0.116 + version: 6.0.116(zod@3.25.76) encoding: specifier: ^0.1.13 version: 0.1.13 @@ -2132,6 +2138,82 @@ importers: specifier: 3.25.76 version: 3.25.76 + references/ai-chat: + dependencies: + '@ai-sdk/anthropic': + specifier: ^3.0.0 + version: 3.0.54(zod@3.25.76) + '@ai-sdk/openai': + specifier: ^3.0.0 + version: 3.0.41(zod@3.25.76) + '@ai-sdk/react': + specifier: ^3.0.0 + version: 3.0.51(react@19.1.0)(zod@3.25.76) + '@prisma/adapter-pg': + specifier: ^7.4.2 + version: 7.4.2 + '@prisma/client': + specifier: ^7.4.2 + version: 7.4.2(prisma@7.4.2(@types/react@19.2.14)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4) + '@trigger.dev/sdk': + specifier: workspace:* + version: link:../../packages/trigger-sdk + ai: + specifier: ^6.0.0 + version: 6.0.116(zod@3.25.76) + next: + specifier: 15.3.3 + version: 15.3.3(@opentelemetry/api@1.9.0)(@playwright/test@1.37.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + pg: + specifier: ^8.16.3 + version: 8.16.3 + react: + specifier: ^19.0.0 + version: 19.1.0 + react-dom: + specifier: ^19.0.0 + version: 19.1.0(react@19.1.0) + streamdown: + specifier: ^2.3.0 + version: 2.3.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + turndown: + specifier: ^7.2.2 + version: 7.2.2 + zod: + specifier: 3.25.76 + version: 3.25.76 + devDependencies: + '@tailwindcss/postcss': + specifier: ^4 + version: 4.0.17 + '@trigger.dev/build': + specifier: workspace:* + version: link:../../packages/build + '@types/node': + specifier: 20.14.14 + version: 20.14.14 + '@types/react': + specifier: ^19 + version: 19.2.14 + '@types/react-dom': + specifier: ^19 + version: 19.0.4(@types/react@19.2.14) + '@types/turndown': + specifier: ^5.0.6 + version: 5.0.6 + prisma: + specifier: ^7.4.2 + version: 7.4.2(@types/react@19.2.14)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) + tailwindcss: + specifier: ^4 + version: 4.0.17 + trigger.dev: + specifier: workspace:* + version: link:../../packages/cli-v3 + typescript: + specifier: 5.5.4 + version: 5.5.4 + references/bun-catalog: dependencies: '@trigger.dev/sdk': @@ -2796,6 +2878,19 @@ importers: specifier: 5.5.4 version: 5.5.4 + references/secure-exec-sandbox: + dependencies: + '@trigger.dev/sdk': + specifier: workspace:* + version: link:../../packages/trigger-sdk + devDependencies: + '@trigger.dev/build': + specifier: workspace:* + version: link:../../packages/build + trigger.dev: + specifier: workspace:* + version: link:../../packages/cli-v3 + references/seed: dependencies: '@sinclair/typebox': @@ -2879,6 +2974,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4 + '@ai-sdk/anthropic@3.0.54': + resolution: {integrity: sha512-UhSPZ63FsTNO7PQCfxsqJIgkij1sivU3qfXydlSd4ugshpkNhd2v9s78G/40/G5C3pKSRfp/CfaSvivrneQfCg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/gateway@1.0.6': resolution: {integrity: sha512-JuSj1MtTr4vw2VBBth4wlbciQnQIV0o1YV9qGLFA+r85nR5H+cJp3jaYE0nprqfzC9rYG8w9c6XGHB3SDKgcgA==} engines: {node: '>=18'} @@ -2897,6 +2998,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/gateway@3.0.22': + resolution: {integrity: sha512-NgnlY73JNuooACHqUIz5uMOEWvqR1MMVbb2soGLMozLY1fgwEIF5iJFDAGa5/YArlzw2ATVU7zQu7HkR/FUjgA==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/gateway@3.0.66': resolution: {integrity: sha512-SIQ0YY0iMuv+07HLsZ+bB990zUJ6S4ujORAh+Jv1V2KGNn73qQKnGO0JBk+w+Res8YqOFSycwDoWcFlQrVxS4A==} engines: {node: '>=18'} @@ -2987,12 +3094,24 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@4.0.17': + resolution: {integrity: sha512-oyCeFINTYK0B8ZGUBiQc05G5vytPlKSmTTtm19xfJuUgoi8zkvvRcoPQci4mSnyfpPn2XSFFDfsALG8uGcapfg==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@4.0.19': resolution: {integrity: sha512-3eG55CrSWCu2SXlqq2QCsFjo3+E7+Gmg7i/oRVoSZzIodTuDSfLb3MRje67xE9RFea73Zao7Lm4mADIfUETKGg==} engines: {node: '>=18'} peerDependencies: zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider-utils@4.0.9': + resolution: {integrity: sha512-bB4r6nfhBOpmoS9mePxjRoCy+LnzP3AfhyMGCkGL4Mn9clVNlqEeKj26zEKEtB6yoSVcT1IQ0Zh9fytwMCDnow==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + '@ai-sdk/provider@0.0.26': resolution: {integrity: sha512-dQkfBDs2lTYpKM8389oopPdQgIU007GQyCbuPPrV+K6MtSII3HBfE0stUIMXUb44L+LK1t6GXPP7wjSzjO6uKg==} engines: {node: '>=18'} @@ -3017,6 +3136,10 @@ packages: resolution: {integrity: sha512-m9ka3ptkPQbaHHZHqDXDF9C9B5/Mav0KTdky1k2HZ3/nrW2t1AgObxIVPyGDWQNS9FXT/FS6PIoSjpcP/No8rQ==} engines: {node: '>=18'} + '@ai-sdk/provider@3.0.5': + resolution: {integrity: sha512-2Xmoq6DBJqmSl80U6V9z5jJSJP7ehaJJQMy2iFUqTay06wdCqTnPVBBQbtEL8RCChenL+q5DC5H5WzU3vV3v8w==} + engines: {node: '>=18'} + '@ai-sdk/provider@3.0.8': resolution: {integrity: sha512-oGMAgGoQdBXbZqNG0Ze56CHjDZ1IDYOwGYxYjO5KLSlz5HiNQ9udIXsPZ61VWaHGZ5XW/jyjmr6t2xz2jGVwbQ==} engines: {node: '>=18'} @@ -3053,6 +3176,12 @@ packages: zod: optional: true + '@ai-sdk/react@3.0.51': + resolution: {integrity: sha512-7nmCwEJM52NQZB4/ED8qJ4wbDg7EEWh94qJ7K9GSJxD6sWF3GOKrRZ5ivm4qNmKhY+JfCxCAxfghGY5mTKOsxw==} + engines: {node: '>=18'} + peerDependencies: + react: ^18 || ~19.0.1 || ~19.1.2 || ^19.2.1 + '@ai-sdk/ui-utils@1.0.0': resolution: {integrity: sha512-oXBDIM/0niWeTWyw77RVl505dNxBUDLLple7bTsqo2d3i1UKwGlzBUX8XqZsh7GbY7I6V05nlG0Y8iGlWxv1Aw==} engines: {node: '>=18'} @@ -4061,18 +4190,30 @@ packages: '@changesets/write@0.2.3': resolution: {integrity: sha512-Dbamr7AIMvslKnNYsLFafaVORx4H0pvCA2MHqgtNCySMe1blImEyAEOzDmcgKAkgz4+uwoLz7demIrX+JBr/Xw==} + '@chevrotain/cst-dts-gen@10.5.0': + resolution: {integrity: sha512-lhmC/FyqQ2o7pGK4Om+hzuDrm9rhFYIJ/AXoQBeongmn870Xeb0L6oGEiuR8nohFNL5sMaQEJWCxr1oIVIVXrw==} + '@chevrotain/cst-dts-gen@11.0.3': resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} + '@chevrotain/gast@10.5.0': + resolution: {integrity: sha512-pXdMJ9XeDAbgOWKuD1Fldz4ieCs6+nLNmyVhe2gZVqoO7v8HXuHYs5OV2EzUtbuai37TlOAQHrTDvxMnvMJz3A==} + '@chevrotain/gast@11.0.3': resolution: {integrity: sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==} '@chevrotain/regexp-to-ast@11.0.3': resolution: {integrity: sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==} + '@chevrotain/types@10.5.0': + resolution: {integrity: sha512-f1MAia0x/pAVPWH/T73BJVyO2XU5tI4/iE7cnxb7tqdNTNhQI3Uq3XkqcoteTmD4t1aM0LbHCJOhgIDn07kl2A==} + '@chevrotain/types@11.0.3': resolution: {integrity: sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==} + '@chevrotain/utils@10.5.0': + resolution: {integrity: sha512-hBzuU5+JjB2cqNZyszkDHZgOSrUUT8V3dhgRl8Q9Gp6dAj/H5+KILGjbhDpc3Iy9qmqlm/akuOI2ut9VUtzJxQ==} + '@chevrotain/utils@11.0.3': resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} @@ -4266,6 +4407,20 @@ packages: '@electric-sql/client@1.0.14': resolution: {integrity: sha512-LtPAfeMxXRiYS0hyDQ5hue2PjljUiK9stvzsVyVb4nwxWQxfOWTSF42bHTs/o5i3x1T4kAQ7mwHpxa4A+f8X7Q==} + '@electric-sql/pglite-socket@0.0.20': + resolution: {integrity: sha512-J5nLGsicnD9wJHnno9r+DGxfcZWh+YJMCe0q/aCgtG6XOm9Z7fKeite8IZSNXgZeGltSigM9U/vAWZQWdgcSFg==} + hasBin: true + peerDependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite-tools@0.2.20': + resolution: {integrity: sha512-BK50ZnYa3IG7ztXhtgYf0Q7zijV32Iw1cYS8C+ThdQlwx12V5VZ9KRJ42y82Hyb4PkTxZQklVQA9JHyUlex33A==} + peerDependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite@0.3.15': + resolution: {integrity: sha512-Cj++n1Mekf9ETfdc16TlDi+cDDQF0W7EcbyRHYOAeZdsAe8M/FJg18itDTSwyHfar2WIezawM9o0EKaRGVKygQ==} + '@electric-sql/react@0.3.5': resolution: {integrity: sha512-qPrlF3BsRg5L8zAn1sLGzc3pkswfEHyQI3lNOu7Xllv1DBx85RvHR1zgGGPAUfC8iwyWupQu9pFPE63GdbeuhA==} peerDependencies: @@ -5945,6 +6100,9 @@ packages: '@microsoft/fetch-event-source@2.0.1': resolution: {integrity: sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA==} + '@mixmark-io/domino@2.2.0': + resolution: {integrity: sha512-Y28PR25bHXUg88kCV7nivXrP2Nj2RueZ3/l/jdx6J9f8J4nsEGcgX0Qe6lt7Pa+J79+kPiJU3LguR6O/6zrLOw==} + '@modelcontextprotocol/sdk@1.25.2': resolution: {integrity: sha512-LZFeo4F9M5qOhC/Uc1aQSrBHxMrvxett+9KLHt7OhcExtoiRN9DKgbZffMP/nxjutWDQpfMDfP3nkHI4X9ijww==} engines: {node: '>=18'} @@ -5965,6 +6123,10 @@ packages: '@cfworker/json-schema': optional: true + '@mrleebo/prisma-ast@0.13.1': + resolution: {integrity: sha512-XyroGQXcHrZdvmrGJvsA9KNeOOgGMg1Vg9OlheUsBOSKznLMDl+YChxbkboRHvtFYJEMRYmlV3uoo/njCw05iw==} + engines: {node: '>=16'} + '@msgpack/msgpack@3.0.0-beta2': resolution: {integrity: sha512-y+l1PNV0XDyY8sM3YtuMLK5vE3/hkfId+Do8pLo/OPxfxuFAUwcGz3oiiUuV46/aBpwTzZ+mRWVMtlSKbradhw==} engines: {node: '>= 14'} @@ -5985,6 +6147,9 @@ packages: '@next/env@15.2.4': resolution: {integrity: sha512-+SFtMgoiYP3WoSswuNmxJOCwi06TdWE733D+WPjpXIe4LXGULwEaofiiAy6kbS0+XjM5xF5n3lKuBwN2SnqD9g==} + '@next/env@15.3.3': + resolution: {integrity: sha512-OdiMrzCl2Xi0VTjiQQUK0Xh7bJHnOuET2s+3V+Y40WJBAXrJeGA3f+I8MZJ/YQ3mVGi5XGR1L66oFlgqXhQ4Vw==} + '@next/env@15.4.8': resolution: {integrity: sha512-LydLa2MDI1NMrOFSkO54mTc8iIHSttj6R6dthITky9ylXV2gCGi0bHQjVCtLGRshdRPjyh2kXbxJukDtBWQZtQ==} @@ -6009,6 +6174,12 @@ packages: cpu: [arm64] os: [darwin] + '@next/swc-darwin-arm64@15.3.3': + resolution: {integrity: sha512-WRJERLuH+O3oYB4yZNVahSVFmtxRNjNF1I1c34tYMoJb0Pve+7/RaLAJJizyYiFhjYNGHRAE1Ri2Fd23zgDqhg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + '@next/swc-darwin-arm64@15.4.8': resolution: {integrity: sha512-Pf6zXp7yyQEn7sqMxur6+kYcywx5up1J849psyET7/8pG2gQTVMjU3NzgIt8SeEP5to3If/SaWmaA6H6ysBr1A==} engines: {node: '>= 10'} @@ -6039,6 +6210,12 @@ packages: cpu: [x64] os: [darwin] + '@next/swc-darwin-x64@15.3.3': + resolution: {integrity: sha512-XHdzH/yBc55lu78k/XwtuFR/ZXUTcflpRXcsu0nKmF45U96jt1tsOZhVrn5YH+paw66zOANpOnFQ9i6/j+UYvw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + '@next/swc-darwin-x64@15.4.8': resolution: {integrity: sha512-xla6AOfz68a6kq3gRQccWEvFC/VRGJmA/QuSLENSO7CZX5WIEkSz7r1FdXUjtGCQ1c2M+ndUAH7opdfLK1PQbw==} engines: {node: '>= 10'} @@ -6072,6 +6249,13 @@ packages: os: [linux] libc: [glibc] + '@next/swc-linux-arm64-gnu@15.3.3': + resolution: {integrity: sha512-VZ3sYL2LXB8znNGcjhocikEkag/8xiLgnvQts41tq6i+wql63SMS1Q6N8RVXHw5pEUjiof+II3HkDd7GFcgkzw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + libc: [glibc] + '@next/swc-linux-arm64-gnu@15.4.8': resolution: {integrity: sha512-y3fmp+1Px/SJD+5ntve5QLZnGLycsxsVPkTzAc3zUiXYSOlTPqT8ynfmt6tt4fSo1tAhDPmryXpYKEAcoAPDJw==} engines: {node: '>= 10'} @@ -6107,6 +6291,13 @@ packages: os: [linux] libc: [musl] + '@next/swc-linux-arm64-musl@15.3.3': + resolution: {integrity: sha512-h6Y1fLU4RWAp1HPNJWDYBQ+e3G7sLckyBXhmH9ajn8l/RSMnhbuPBV/fXmy3muMcVwoJdHL+UtzRzs0nXOf9SA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + libc: [musl] + '@next/swc-linux-arm64-musl@15.4.8': resolution: {integrity: sha512-DX/L8VHzrr1CfwaVjBQr3GWCqNNFgyWJbeQ10Lx/phzbQo3JNAxUok1DZ8JHRGcL6PgMRgj6HylnLNndxn4Z6A==} engines: {node: '>= 10'} @@ -6142,6 +6333,13 @@ packages: os: [linux] libc: [glibc] + '@next/swc-linux-x64-gnu@15.3.3': + resolution: {integrity: sha512-jJ8HRiF3N8Zw6hGlytCj5BiHyG/K+fnTKVDEKvUCyiQ/0r5tgwO7OgaRiOjjRoIx2vwLR+Rz8hQoPrnmFbJdfw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + libc: [glibc] + '@next/swc-linux-x64-gnu@15.4.8': resolution: {integrity: sha512-9fLAAXKAL3xEIFdKdzG5rUSvSiZTLLTCc6JKq1z04DR4zY7DbAPcRvNm3K1inVhTiQCs19ZRAgUerHiVKMZZIA==} engines: {node: '>= 10'} @@ -6177,6 +6375,13 @@ packages: os: [linux] libc: [musl] + '@next/swc-linux-x64-musl@15.3.3': + resolution: {integrity: sha512-HrUcTr4N+RgiiGn3jjeT6Oo208UT/7BuTr7K0mdKRBtTbT4v9zJqCDKO97DUqqoBK1qyzP1RwvrWTvU6EPh/Cw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + libc: [musl] + '@next/swc-linux-x64-musl@15.4.8': resolution: {integrity: sha512-s45V7nfb5g7dbS7JK6XZDcapicVrMMvX2uYgOHP16QuKH/JA285oy6HcxlKqwUNaFY/UC6EvQ8QZUOo19cBKSA==} engines: {node: '>= 10'} @@ -6209,6 +6414,12 @@ packages: cpu: [arm64] os: [win32] + '@next/swc-win32-arm64-msvc@15.3.3': + resolution: {integrity: sha512-SxorONgi6K7ZUysMtRF3mIeHC5aA3IQLmKFQzU0OuhuUYwpOBc1ypaLJLP5Bf3M9k53KUUUj4vTPwzGvl/NwlQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + '@next/swc-win32-arm64-msvc@15.4.8': resolution: {integrity: sha512-KjgeQyOAq7t/HzAJcWPGA8X+4WY03uSCZ2Ekk98S9OgCFsb6lfBE3dbUzUuEQAN2THbwYgFfxX2yFTCMm8Kehw==} engines: {node: '>= 10'} @@ -6251,6 +6462,12 @@ packages: cpu: [x64] os: [win32] + '@next/swc-win32-x64-msvc@15.3.3': + resolution: {integrity: sha512-4QZG6F8enl9/S2+yIiOiju0iCTFd93d8VC1q9LZS4p/Xuk81W2QDjCFeoogmrWWkAD59z8ZxepBQap2dKS5ruw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + '@next/swc-win32-x64-msvc@15.4.8': resolution: {integrity: sha512-Exsmf/+42fWVnLMaZHzshukTBxZrSwuuLKFvqhGHJ+mC1AokqieLY/XzAl3jc/CqhXLqLY3RRjkKJ9YnLPcRWg==} engines: {node: '>= 10'} @@ -6922,9 +7139,15 @@ packages: '@prisma/adapter-pg@6.20.0-integration-next.8': resolution: {integrity: sha512-5+ZjSPMzyfDYMmWLH1IaQIOQGa8eJrqEz5A9V4vS4+b6LV6qvCOHjqlnbRQ5IKSNCwFP055SJ54RsPES+0jOyA==} + '@prisma/adapter-pg@7.4.2': + resolution: {integrity: sha512-oUo2Zhe9Tf6YwVL8kLPuOLTK1Z2pwi/Ua77t2PuGyBan2w7shRKqHvYK+3XXmRH9RWhPJ4SMtHZKpNo6Ax/4bQ==} + '@prisma/client-runtime-utils@6.20.0-integration-next.8': resolution: {integrity: sha512-prENLjPislFvRWDHNgXmg9yzixQYsFPVQGtDv5zIMs4pV2KPdNc5pCiZ3n77hAinvqGJVafASa+eU4TfpVphdA==} + '@prisma/client-runtime-utils@7.4.2': + resolution: {integrity: sha512-cID+rzOEb38VyMsx5LwJMEY4NGIrWCNpKu/0ImbeooQ2Px7TI+kOt7cm0NelxUzF2V41UVVXAmYjANZQtCu1/Q==} + '@prisma/client@4.9.0': resolution: {integrity: sha512-bz6QARw54sWcbyR1lLnF2QHvRW5R/Jxnbbmwh3u+969vUKXtBkXgSgjDA85nji31ZBlf7+FrHDy5x+5ydGyQDg==} engines: {node: '>=14.17'} @@ -6982,6 +7205,18 @@ packages: typescript: optional: true + '@prisma/client@7.4.2': + resolution: {integrity: sha512-ts2mu+cQHriAhSxngO3StcYubBGTWDtu/4juZhXCUKOwgh26l+s4KD3vT2kMUzFyrYnll9u/3qWrtzRv9CGWzA==} + engines: {node: ^20.19 || ^22.12 || >=24.0} + peerDependencies: + prisma: '*' + typescript: 5.5.4 + peerDependenciesMeta: + prisma: + optional: true + typescript: + optional: true + '@prisma/config@6.14.0': resolution: {integrity: sha512-IwC7o5KNNGhmblLs23swnfBjADkacBb7wvyDXUWLwuvUQciKJZqyecU0jw0d7JRkswrj+XTL8fdr0y2/VerKQQ==} @@ -6994,6 +7229,9 @@ packages: '@prisma/config@6.20.0-integration-next.8': resolution: {integrity: sha512-nwf+tczfiGSn0tnuHmBpnK+wmaYzcC20sn9Zt8BSoJVCewJxf8ASHPxZEGgvFLl05zbCfFtq3rMc6ZnAiYjowg==} + '@prisma/config@7.4.2': + resolution: {integrity: sha512-CftBjWxav99lzY1Z4oDgomdb1gh9BJFAOmWF6P2v1xRfXqQb56DfBub+QKcERRdNoAzCb3HXy3Zii8Vb4AsXhg==} + '@prisma/debug@4.16.2': resolution: {integrity: sha512-7L7WbG0qNNZYgLpsVB8rCHCXEyHFyIycRlRDNwkVfjQmACC2OW6AWCYCbfdjQhkF/t7+S3njj8wAWAocSs+Brw==} @@ -7009,12 +7247,24 @@ packages: '@prisma/debug@6.20.0-integration-next.8': resolution: {integrity: sha512-PqUUFXf8MDoIrsKMzpF4NYqA3gHE8l/CUWVnYa4hNIbynCcEhvk7iT+6ve0u9w1TiGVUFnIVMuqFGEb2aHCuFw==} + '@prisma/debug@7.2.0': + resolution: {integrity: sha512-YSGTiSlBAVJPzX4ONZmMotL+ozJwQjRmZweQNIq/ER0tQJKJynNkRB3kyvt37eOfsbMCXk3gnLF6J9OJ4QWftw==} + + '@prisma/debug@7.4.2': + resolution: {integrity: sha512-aP7qzu+g/JnbF6U69LMwHoUkELiserKmWsE2shYuEpNUJ4GrtxBCvZwCyCBHFSH2kLTF2l1goBlBh4wuvRq62w==} + + '@prisma/dev@0.20.0': + resolution: {integrity: sha512-ovlBYwWor0OzG+yH4J3Ot+AneD818BttLA+Ii7wjbcLHUrnC4tbUPVGyNd3c/+71KETPKZfjhkTSpdS15dmXNQ==} + '@prisma/driver-adapter-utils@6.16.0': resolution: {integrity: sha512-dsRHvEnifJ3xqpMKGBy1jRwR8yc+7Ko4TcHrdTQJIfq6NYN2gNoOf0k91hcbzs5AH19wDxjuHXCveklWq5AJdA==} '@prisma/driver-adapter-utils@6.20.0-integration-next.8': resolution: {integrity: sha512-TXpFugr3sCl2bHechoG3p9mvlq2Z3GgA0Cp73lUOEWQyUuoG8NW/4UA56Ax1r5fBUAs9hKbr20Ld6wKCZhnz8Q==} + '@prisma/driver-adapter-utils@7.4.2': + resolution: {integrity: sha512-REdjFpT/ye9KdDs+CXAXPIbMQkVLhne9G5Pe97sNY4Ovx4r2DAbWM9hOFvvB1Oq8H8bOCdu0Ri3AoGALquQqVw==} + '@prisma/engines-version@4.9.0-42.ceb5c99003b99c9ee2c1d2e618e359c14aef2ea5': resolution: {integrity: sha512-M16aibbxi/FhW7z1sJCX8u+0DriyQYY5AyeTH7plQm9MLnURoiyn3CZBqAyIoQ+Z1pS77usCIibYJWSgleBMBA==} @@ -7030,6 +7280,9 @@ packages: '@prisma/engines-version@6.20.0-11.next-80ee0a44bf5668992b0c909c946a755b86b56c95': resolution: {integrity: sha512-DqrQqRIgeocvWpgN7t9PymiJdV8ISSSrZCuilAtpKEaKIt4JUGIxsAdWNMRSHk188hYA2W1YFG5KvWUYBaCO1A==} + '@prisma/engines-version@7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919': + resolution: {integrity: sha512-5FIKY3KoYQlBuZC2yc16EXfVRQ8HY+fLqgxkYfWCtKhRb3ajCRzP/rPeoSx11+NueJDANdh4hjY36mdmrTcGSg==} + '@prisma/engines@6.14.0': resolution: {integrity: sha512-LhJjqsALFEcoAtF07nSaOkVguaxw/ZsgfROIYZ8bAZDobe7y8Wy+PkYQaPOK1iLSsFgV2MhCO/eNrI1gdSOj6w==} @@ -7042,6 +7295,9 @@ packages: '@prisma/engines@6.20.0-integration-next.8': resolution: {integrity: sha512-XdzTxN0PFLIW2DcprG9xlMy39FrsjxW5J2qtHQ58FBtbllHSZGD0pK2nzATw5dRh7nGhmX+uNA02cqHv5oND3A==} + '@prisma/engines@7.4.2': + resolution: {integrity: sha512-B+ZZhI4rXlzjVqRw/93AothEKOU5/x4oVyJFGo9RpHPnBwaPwk4Pi0Q4iGXipKxeXPs/dqljgNBjK0m8nocOJA==} + '@prisma/fetch-engine@6.14.0': resolution: {integrity: sha512-MPzYPOKMENYOaY3AcAbaKrfvXVlvTc6iHmTXsp9RiwCX+bPyfDMqMFVUSVXPYrXnrvEzhGHfyiFy0PRLHPysNg==} @@ -7054,6 +7310,9 @@ packages: '@prisma/fetch-engine@6.20.0-integration-next.8': resolution: {integrity: sha512-zVNM5Q1hFclpqD1y7wujDzyc3l01S8ZMuP0Zddzuda4LOA7/F2enjro48VcD2/fxkBgzkkmO/quLOGnbQDKO7g==} + '@prisma/fetch-engine@7.4.2': + resolution: {integrity: sha512-f/c/MwYpdJO7taLETU8rahEstLeXfYgQGlz5fycG7Fbmva3iPdzGmjiSWHeSWIgNnlXnelUdCJqyZnFocurZuA==} + '@prisma/generator-helper@4.16.2': resolution: {integrity: sha512-bMOH7y73Ui7gpQrioFeavMQA+Tf8ksaVf8Nhs9rQNzuSg8SSV6E9baczob0L5KGZTSgYoqnrRxuo03kVJYrnIg==} @@ -7069,6 +7328,12 @@ packages: '@prisma/get-platform@6.20.0-integration-next.8': resolution: {integrity: sha512-21jEfhFpC8FuvPD7JEf1Qu02engBCBa3+1il3UiyHKcKS3Kbp9IgR+DVqqrqSWIGJg8+1oTfF/3AgbjunaQ1Ag==} + '@prisma/get-platform@7.2.0': + resolution: {integrity: sha512-k1V0l0Td1732EHpAfi2eySTezyllok9dXb6UQanajkJQzPUGi3vO2z7jdkz67SypFTdmbnyGYxvEvYZdZsMAVA==} + + '@prisma/get-platform@7.4.2': + resolution: {integrity: sha512-UTnChXRwiauzl/8wT4hhe7Xmixja9WE28oCnGpBtRejaHhvekx5kudr3R4Y9mLSA0kqGnAMeyTiKwDVMjaEVsw==} + '@prisma/instrumentation@6.11.1': resolution: {integrity: sha512-mrZOev24EDhnefmnZX7WVVT7v+r9LttPRqf54ONvj6re4XMF7wFTpK2tLJi4XHB7fFp/6xhYbgRel8YV7gQiyA==} peerDependencies: @@ -7079,6 +7344,9 @@ packages: peerDependencies: '@opentelemetry/api': ^1.8 + '@prisma/query-plan-executor@7.2.0': + resolution: {integrity: sha512-EOZmNzcV8uJ0mae3DhTsiHgoNCuu1J9mULQpGCh62zN3PxPTd+qI9tJvk5jOst8WHKQNwJWR3b39t0XvfBB0WQ==} + '@prisma/studio-core-licensed@0.6.0': resolution: {integrity: sha512-LNC8ohLosuWz6n9oKNqfR5Ep/JYiPavk4RxrU6inOS4LEvMQts8N+Vtt7NAB9i06BaiIRKnPsg1Hcaao5pRjSw==} peerDependencies: @@ -7086,6 +7354,13 @@ packages: react: ^18.0.0 || ^19.0.0 react-dom: ^18.0.0 || ^19.0.0 + '@prisma/studio-core@0.13.1': + resolution: {integrity: sha512-agdqaPEePRHcQ7CexEfkX1RvSH9uWDb6pXrZnhCRykhDFAV0/0P3d07WtfiY8hZWb7oRU4v+NkT4cGFHkQJIPg==} + peerDependencies: + '@types/react': ^18.0.0 || ^19.0.0 + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + '@protobuf-ts/runtime@2.11.1': resolution: {integrity: sha512-KuDaT1IfHkugM2pyz+FwiY80ejWrkH1pAtOBOZFuR6SXEFTsnb/jiQWQ1rCIrcKx2BtyxnxW6BWwsVSA/Ie+WQ==} @@ -11027,6 +11302,9 @@ packages: '@types/trusted-types@2.0.7': resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + '@types/turndown@5.0.6': + resolution: {integrity: sha512-ru00MoyeeouE5BX4gRL+6m/BsDfbRayOskWqUvh7CLGW+UXxHQItqALa38kKnOiZPqJrtzJUgAC2+F0rL1S4Pg==} + '@types/unist@2.0.6': resolution: {integrity: sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ==} @@ -11551,6 +11829,12 @@ packages: peerDependencies: zod: ^3.25.76 || ^4.1.8 + ai@6.0.49: + resolution: {integrity: sha512-LABniBX/0R6Tv+iUK5keUZhZLaZUe4YjP5M2rZ4wAdZ8iKV3EfTAoJxuL1aaWTSJKIilKa9QUEkCgnp89/32bw==} + engines: {node: '>=18'} + peerDependencies: + zod: ^3.25.76 || ^4.1.8 + ajv-formats@2.1.1: resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==} peerDependencies: @@ -11790,6 +12074,10 @@ packages: aws-sign2@0.7.0: resolution: {integrity: sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==} + aws-ssl-profiles@1.1.2: + resolution: {integrity: sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g==} + engines: {node: '>= 6.0.0'} + aws4@1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} @@ -12162,6 +12450,9 @@ packages: peerDependencies: chevrotain: ^11.0.0 + chevrotain@10.5.0: + resolution: {integrity: sha512-Pkv5rBY3+CsHOYfV5g/Vs5JY9WTHHDEKOlohI2XeygaZhUeqhAlldZ8Hz9cRmxu709bvS08YzxHdTPHhffc13A==} + chevrotain@11.0.3: resolution: {integrity: sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==} @@ -14087,6 +14378,10 @@ packages: resolution: {integrity: sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==} engines: {node: '>=14'} + foreground-child@3.3.1: + resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} + engines: {node: '>=14'} + forever-agent@0.6.1: resolution: {integrity: sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==} @@ -14210,6 +14505,9 @@ packages: functions-have-names@1.2.3: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} + generate-function@2.3.1: + resolution: {integrity: sha512-eeB5GfMNeevm/GRYq20ShmsaGcmI81kIX2K9XQx5miC8KdHaC6Jm0qQ8ZNeGOi7wYB8OsdxKs+Y2oVuTFuVwKQ==} + generic-names@4.0.0: resolution: {integrity: sha512-ySFolZQfw9FoDb3ed9d80Cm9f0+r7qj+HJkWjeD9RBfpxEVTlVhol+gvaQB/78WbwYfbnNh8nWHHBSlg072y6A==} @@ -14233,6 +14531,9 @@ packages: resolution: {integrity: sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==} engines: {node: '>=6'} + get-port-please@3.2.0: + resolution: {integrity: sha512-I9QVvBw5U/hw3RmWpYKRumUeaDgxTPd401x364rLmWBJcOQ753eov1eTgzDqRG9bqFIfDc7gfzcQEWrUri3o1A==} + get-port@5.1.1: resolution: {integrity: sha512-g/Q1aTSDOxFpchXC4i8ZWvxA1lnPqx/JHqcpIw0/LX9T8x/GBbi6YnlN5nhaKIFkT8oFsscUKgDJYxfwfS6QsQ==} engines: {node: '>=8'} @@ -14384,6 +14685,9 @@ packages: resolution: {integrity: sha512-rEDCuqUQ4tbD78TpzsMtt5OIf0cBCSDWSJtUDaF6JsAh+k0v9r++NzxNEG87oDZx9ZwGhD8DaezR2L/yrw0Jdw==} engines: {node: '>=10'} + grammex@3.1.12: + resolution: {integrity: sha512-6ufJOsSA7LcQehIJNCO7HIBykfM7DXQual0Ny780/DEcJIpBlHRvcqEBWGPYd7hrXL2GJ3oJI1MIhaXjWmLQOQ==} + grapheme-splitter@1.0.4: resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} @@ -14396,6 +14700,9 @@ packages: engines: {node: '>=14.0.0'} hasBin: true + graphmatch@1.1.1: + resolution: {integrity: sha512-5ykVn/EXM1hF0XCaWh05VbYvEiOL2lY1kBxZtaYsyvjp7cmWOU1XsAdfQBwClraEofXDT197lFbXOEVMHpvQOg==} + graphql@16.6.0: resolution: {integrity: sha512-KPIBPDlW7NxrbT/eh4qPXz5FiFdL5UbaA0XUNz2Rp3Z3hqBSkbj0GVjwFDztsWVauZUWsbKHgMg++sk8UX0bkw==} engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} @@ -14487,6 +14794,9 @@ packages: hast-util-raw@9.1.0: resolution: {integrity: sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==} + hast-util-sanitize@5.0.2: + resolution: {integrity: sha512-3yTWghByc50aGS7JlGhk61SPenfE/p1oaFeNwkOOyrscaOkMGrcW9+Cy/QAIOBpZxP1yqDIzFMR0+Np0i0+usg==} + hast-util-to-estree@2.1.0: resolution: {integrity: sha512-Vwch1etMRmm89xGgz+voWXvVHba2iiMdGMKmaMfYt35rbVtFDq8JNwwAIvi8zHMkO6Gvqo9oTMwJTmzVRfXh4g==} @@ -14521,6 +14831,10 @@ packages: hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} + hono@4.11.4: + resolution: {integrity: sha512-U7tt8JsyrxSRKspfhtLET79pU8K+tInj5QZXs1jSugO1Vq5dFj3kmZsRldo29mTBfcjDRVRXrEZ6LS63Cog9ZA==} + engines: {node: '>=16.9.0'} + hono@4.11.8: resolution: {integrity: sha512-eVkB/CYCCei7K2WElZW9yYQFWssG0DhaDhVvr7wy5jJ22K+ck8fWW0EsLpB0sITUTvPnc97+rrbQqIr5iqiy9Q==} engines: {node: '>=16.9.0'} @@ -14575,6 +14889,9 @@ packages: resolution: {integrity: sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==} engines: {node: '>=0.8', npm: '>=1.3.7'} + http-status-codes@2.3.0: + resolution: {integrity: sha512-RJ8XvFvpPM/Dmc5SV+dC4y5PCeOhT3x1Hq0NU3rjGeg5a/CqlhZ7uudknPwZFz4aeAXDcbAyaeP7GAo9lvngtA==} + https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} @@ -14906,6 +15223,9 @@ packages: is-promise@4.0.0: resolution: {integrity: sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==} + is-property@1.0.2: + resolution: {integrity: sha512-Ks/IoX00TtClbGQr4TWXemAnktAQvYB7HzcCxDGqEZU6oCmb2INHuOoKxbtR+HFkmYWBKv/dOZtGRiAjDhj92g==} + is-reference@3.0.3: resolution: {integrity: sha512-ixkJoqQvAP88E6wLydLGGqCJsrFUnqoH6HnaczB8XmDH1oaWU+xxdptvikTgaEhtZ53Ky6YXiBuUI2WXLMCwjw==} @@ -15584,6 +15904,10 @@ packages: resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==} engines: {node: '>=12'} + lru.min@1.1.4: + resolution: {integrity: sha512-DqC6n3QQ77zdFpCMASA1a3Jlb64Hv2N2DciFGkO/4L9+q/IpIAuRlKOvCXabtRW6cQf8usbmM6BE/TOPysCdIA==} + engines: {bun: '>=1.0.0', deno: '>=1.30.0', node: '>=8.0.0'} + lucide-react@0.229.0: resolution: {integrity: sha512-b0/KSFXhPi++vUbnYEDUgP8Z8Rw9MQpRfBr+dRZNPMT3FD1HrVgMHXhSpkm9ZrrEtuqIfHf/O+tAGmw4WOmIog==} peerDependencies: @@ -15659,6 +15983,11 @@ packages: engines: {node: '>= 20'} hasBin: true + marked@17.0.1: + resolution: {integrity: sha512-boeBdiS0ghpWcSwoNm/jJBwdpFaMnZWRzjA6SkUMYb40SVaN1x7mmfGKp0jvexGcx+7y2La5zRZsYFZI6Qpypg==} + engines: {node: '>= 20'} + hasBin: true + marked@4.2.5: resolution: {integrity: sha512-jPueVhumq7idETHkb203WDD4fMA3yV9emQ5vLwop58lu8bTclMghBWcYAavlDqIEMaisADinV1TooIFCfqOsYQ==} engines: {node: '>= 12'} @@ -16221,9 +16550,17 @@ packages: resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} hasBin: true + mysql2@3.15.3: + resolution: {integrity: sha512-FBrGau0IXmuqg4haEZRBfHNWB5mUARw6hNwPDXXGg0XzVJ50mr/9hb267lvpVMnhZ1FON3qNd4Xfcez1rbFwSg==} + engines: {node: '>= 8.0'} + mz@2.7.0: resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} + named-placeholders@1.1.6: + resolution: {integrity: sha512-Tz09sEL2EEuv5fFowm419c1+a/jSMiBjI9gHxVLrVdbUkkNUUfjsVYs9pVZu5oCon/kmRh9TfLEObFtkVxmY0w==} + engines: {node: '>=8.0.0'} + nan@2.23.1: resolution: {integrity: sha512-r7bBUGKzlqk8oPBDYxt6Z0aEdF1G1rwlMcLk8LCOMbOzf0mG+JUfUzG4fIMWwHWP0iyaLWEQZJmtB7nOHEm/qw==} @@ -16350,6 +16687,28 @@ packages: sass: optional: true + next@15.3.3: + resolution: {integrity: sha512-JqNj29hHNmCLtNvd090SyRbXJiivQ+58XjCcrC50Crb5g5u2zi7Y2YivbsEfzk6AtVI80akdOQbaMZwWB1Hthw==} + engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} + deprecated: This version has a security vulnerability. Please upgrade to a patched version. See https://nextjs.org/blog/CVE-2025-66478 for more details. + hasBin: true + peerDependencies: + '@opentelemetry/api': ^1.1.0 + '@playwright/test': ^1.41.2 + babel-plugin-react-compiler: '*' + react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + sass: ^1.3.0 + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@playwright/test': + optional: true + babel-plugin-react-compiler: + optional: true + sass: + optional: true + next@15.4.8: resolution: {integrity: sha512-jwOXTz/bo0Pvlf20FSb6VXVeWRssA2vbvq9SdrOPEg9x8E1B27C2rQtvriAn600o9hH61kjrVRexEffv3JybuA==} engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0} @@ -17495,6 +17854,19 @@ packages: typescript: optional: true + prisma@7.4.2: + resolution: {integrity: sha512-2bP8Ruww3Q95Z2eH4Yqh4KAENRsj/SxbdknIVBfd6DmjPwmpsC4OVFMLOeHt6tM3Amh8ebjvstrUz3V/hOe1dA==} + engines: {node: ^20.19 || ^22.12 || >=24.0} + hasBin: true + peerDependencies: + better-sqlite3: '>=9.0.0' + typescript: 5.5.4 + peerDependenciesMeta: + better-sqlite3: + optional: true + typescript: + optional: true + prismjs@1.29.0: resolution: {integrity: sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==} engines: {node: '>=6'} @@ -17977,6 +18349,9 @@ packages: regex@6.0.1: resolution: {integrity: sha512-uorlqlzAKjKQZ5P+kTJr3eeJGSVroLKoHmquUj4zHWuR+hEyNqlXsSKlYYF5F4NI6nl7tWCs0apKJ0lmfsXAPA==} + regexp-to-ast@0.5.0: + resolution: {integrity: sha512-tlbJqcMHnPKI9zSrystikWKwHkBqu2a/Sgw01h3zFjvYrMxEDYHzzoMZnUrbIfpTFEsoRnnviOXNCzFiSc54Qw==} + regexp.prototype.flags@1.4.3: resolution: {integrity: sha512-fjggEOO3slI6Wvgjwflkc4NFRCTZAu5CnNfBd5qOMYhWdn67nJBBu34/TkD++eeFmd8C9r9jfXJ27+nSiRkSUA==} engines: {node: '>= 0.4'} @@ -18003,12 +18378,18 @@ packages: rehype-harden@1.1.5: resolution: {integrity: sha512-JrtBj5BVd/5vf3H3/blyJatXJbzQfRT9pJBmjafbTaPouQCAKxHwRyCc7dle9BXQKxv4z1OzZylz/tNamoiG3A==} + rehype-harden@1.1.8: + resolution: {integrity: sha512-Qn7vR1xrf6fZCrkm9TDWi/AB4ylrHy+jqsNm1EHOAmbARYA6gsnVJBq/sdBh6kmT4NEZxH5vgIjrscefJAOXcw==} + rehype-katex@7.0.1: resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} rehype-raw@7.0.0: resolution: {integrity: sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==} + rehype-sanitize@6.0.0: + resolution: {integrity: sha512-CsnhKNsyI8Tub6L4sm5ZFsme4puGfc6pYylvXo1AeqaGbjOYyzNv3qZPwvs0oMJ39eryyeOdmxwUIo94IpEhqg==} + remark-frontmatter@4.0.1: resolution: {integrity: sha512-38fJrB0KnmD3E33a5jZC/5+gGAC2WKNiPw1/fdXJvijBlhA7RCsvJklrYJakS0HedninvaCYW8lQGf9C918GfA==} @@ -18037,9 +18418,18 @@ packages: remark-rehype@11.1.1: resolution: {integrity: sha512-g/osARvjkBXb6Wo0XvAeXQohVta8i84ACbenPpoSsxTOQH/Ae0/RGP4WZgnMH5pMLpsj4FG7OHmcIcXxpza8eQ==} + remark-rehype@11.1.2: + resolution: {integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==} + remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + remeda@2.33.4: + resolution: {integrity: sha512-ygHswjlc/opg2VrtiYvUOPLjxjtdKvjGz1/plDhkG66hjNjFr1xmfrs2ClNFo/E6TyUFiwYNh53bKV26oBoMGQ==} + + remend@1.2.1: + resolution: {integrity: sha512-4wC12bgXsfKAjF1ewwkNIQz5sqewz/z1xgIgjEMb3r1pEytQ37F0Cm6i+OhbTWEvguJD7lhOUJhK5fSasw9f0w==} + remix-auth-email-link@2.0.2: resolution: {integrity: sha512-Lze9c50fsqBpixXQKe37wI2Dm4rlYYkNA6Eskxk8erQ7tbyN8xiFXOgo7Y3Al0SSjzkezw8au3uc2vCLJ8A5mQ==} peerDependencies: @@ -18389,6 +18779,9 @@ packages: resolution: {integrity: sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==} engines: {node: '>= 18'} + seq-queue@0.0.5: + resolution: {integrity: sha512-hr3Wtp/GZIc/6DAGPDcV4/9WoZhjrkXsi5B/07QgX8tsdc6ilr7BFM6PM6rbdAX1kFSDYeZGLipIZZKyQP0O5Q==} + serialize-javascript@6.0.1: resolution: {integrity: sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==} @@ -18661,6 +19054,10 @@ packages: resolution: {integrity: sha512-mkpF+RG402P66VMsnQkWewTRzDBWfu9iLbOfxaW/nAKOS/2A9MheQmcU5cmX0D0At9azrorZwpvcBRNNBozACQ==} hasBin: true + sqlstring@2.3.3: + resolution: {integrity: sha512-qC9iz2FlN7DQl3+wjwn3802RTyjCx7sDvfQEXchwa6CWOx07/WVfh91gBmQ9fahw8snwGEWU3xGzOt4tFyHLxg==} + engines: {node: '>= 0.6'} + sqs-consumer@7.5.0: resolution: {integrity: sha512-aY3akgMjuK1aj4E7ZVAURUUnC8aNgUBES+b4SN+6ccMmJhi37MamWl7g1JbPow8sjIp1fBPz1bXCCDJmtjOTAg==} engines: {node: '>=18.0.0'} @@ -18713,6 +19110,9 @@ packages: resolution: {integrity: sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==} engines: {node: '>= 0.8'} + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + std-env@3.7.0: resolution: {integrity: sha512-JPbdCEQLj1w5GilpiHAx3qJvFndqybBysA3qUOnznweH4QbNYUsW/ea8QzSrnh0vNsezMMw5bcVool8lM0gwzg==} @@ -18740,6 +19140,12 @@ packages: peerDependencies: react: ^18.0.0 || ^19.0.0 + streamdown@2.3.0: + resolution: {integrity: sha512-OqS3by/lt91lSicE8RQP2nTsYI6Q/dQgGP2vcyn9YesCmRHhNjswAuBAZA1z0F4+oBU3II/eV51LqjCqwTb1lw==} + peerDependencies: + react: ^18.0.0 || ^19.0.0 + react-dom: ^18.0.0 || ^19.0.0 + streamsearch@1.1.0: resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} engines: {node: '>=10.0.0'} @@ -18980,6 +19386,9 @@ packages: tailwind-merge@3.3.1: resolution: {integrity: sha512-gBXpgUm/3rp1lMZZrM/w7D8GKqshif0zAymAhbCyIt8KMe+0v9DQ7cdYLR4FHH/cKpdTXb+A/tKKU3eolfsI+g==} + tailwind-merge@3.5.0: + resolution: {integrity: sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==} + tailwind-scrollbar-hide@1.1.7: resolution: {integrity: sha512-X324n9OtpTmOMqEgDUEA/RgLrNfBF/jwJdctaPZDzB3mppxJk7TLIDmOreEDm1Bq4R9LSPu4Epf8VSdovNU+iA==} @@ -19047,21 +19456,22 @@ packages: tar@6.1.13: resolution: {integrity: sha512-jdIBIN6LTIe2jqzay/2vtYLlBHa3JF42ot3h1dW8Q0PaAG4v8rm0cvpVePtau5C6OKXGGcgO9q2AMNSWxiLqKw==} engines: {node: '>=10'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tar@6.2.1: resolution: {integrity: sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==} engines: {node: '>=10'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tar@7.4.3: resolution: {integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==} engines: {node: '>=18'} - deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exhorbitant rates) by contacting i@izs.me + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tar@7.5.6: resolution: {integrity: sha512-xqUeu2JAIJpXyvskvU3uvQW8PAmHrtXp2KDuMJwQqW8Sqq0CaZBAQ+dKS3RBXVhU4wC5NjAdKrmh84241gO9cA==} engines: {node: '>=18'} + deprecated: Old versions of tar are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me tdigest@0.1.2: resolution: {integrity: sha512-+G0LLgjjo9BZX2MfdvPfH+MKLCrxlXSYec5DaPYP1fe6Iyhf0/fSmJ0bFiZ1F8BT6cGXl2LpltQptzjXKWEkKA==} @@ -19464,6 +19874,9 @@ packages: resolution: {integrity: sha512-U4gKCWcKgLcCjQd4Pl8KJdfEKumpyWbzRu75A6FCj6Ctea1PIm58W6Ltw1QXKqHrl2pF9e1raAskf/h6dlrPCA==} hasBin: true + turndown@7.2.2: + resolution: {integrity: sha512-1F7db8BiExOKxjSMU2b7if62D/XOyQyZbPKq/nUwopfgnHlqXHqQ0lvfUTeUIr1lZJzOPFn43dODyMSIfvWRKQ==} + tw-animate-css@1.2.4: resolution: {integrity: sha512-yt+HkJB41NAvOffe4NweJU6fLqAlVx/mBX6XmHRp15kq0JxTtOKaIw8pVSWM1Z+n2nXtyi7cW6C9f0WG/F/QAQ==} @@ -19802,6 +20215,14 @@ packages: typescript: optional: true + valibot@1.2.0: + resolution: {integrity: sha512-mm1rxUsmOxzrwnX5arGS+U4T25RdvpPjPN4yR0u9pUBov9+zGVtO84tif1eY4r6zWxVxu3KzIyknJy3rxfRZZg==} + peerDependencies: + typescript: 5.5.4 + peerDependenciesMeta: + typescript: + optional: true + validate-npm-package-license@3.0.4: resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} @@ -20260,6 +20681,9 @@ packages: yup@1.7.0: resolution: {integrity: sha512-VJce62dBd+JQvoc+fCVq+KZfPHr+hXaxCcVgotfwWvlR0Ja3ffYKaJBT8rptPOSKOGJDCUnW2C2JWpud7aRP6Q==} + zeptomatch@2.1.0: + resolution: {integrity: sha512-KiGErG2J0G82LSpniV0CtIzjlJ10E04j02VOudJsPyPwNZgGnRKQy7I1R7GMyg/QswnE4l7ohSGrQbQbjXPPDA==} + zip-stream@6.0.1: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} @@ -20327,6 +20751,12 @@ snapshots: '@ai-sdk/provider-utils': 3.0.3(zod@3.25.76) zod: 3.25.76 + '@ai-sdk/anthropic@3.0.54(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@ai-sdk/provider-utils': 4.0.17(zod@3.25.76) + zod: 3.25.76 + '@ai-sdk/gateway@1.0.6(zod@3.25.76)': dependencies: '@ai-sdk/provider': 2.0.0 @@ -20347,6 +20777,13 @@ snapshots: '@vercel/oidc': 3.0.5 zod: 3.25.76 + '@ai-sdk/gateway@3.0.22(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.9(zod@3.25.76) + '@vercel/oidc': 3.1.0 + zod: 3.25.76 + '@ai-sdk/gateway@3.0.66(zod@3.25.76)': dependencies: '@ai-sdk/provider': 3.0.8 @@ -20444,6 +20881,13 @@ snapshots: eventsource-parser: 3.0.6 zod: 3.25.76 + '@ai-sdk/provider-utils@4.0.17(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.8 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 3.25.76 + '@ai-sdk/provider-utils@4.0.19(zod@3.25.76)': dependencies: '@ai-sdk/provider': 3.0.8 @@ -20451,6 +20895,13 @@ snapshots: eventsource-parser: 3.0.6 zod: 3.25.76 + '@ai-sdk/provider-utils@4.0.9(zod@3.25.76)': + dependencies: + '@ai-sdk/provider': 3.0.5 + '@standard-schema/spec': 1.1.0 + eventsource-parser: 3.0.6 + zod: 3.25.76 + '@ai-sdk/provider@0.0.26': dependencies: json-schema: 0.4.0 @@ -20475,6 +20926,10 @@ snapshots: dependencies: json-schema: 0.4.0 + '@ai-sdk/provider@3.0.5': + dependencies: + json-schema: 0.4.0 + '@ai-sdk/provider@3.0.8': dependencies: json-schema: 0.4.0 @@ -20509,6 +20964,16 @@ snapshots: optionalDependencies: zod: 3.25.76 + '@ai-sdk/react@3.0.51(react@19.1.0)(zod@3.25.76)': + dependencies: + '@ai-sdk/provider-utils': 4.0.9(zod@3.25.76) + ai: 6.0.49(zod@3.25.76) + react: 19.1.0 + swr: 2.2.5(react@19.1.0) + throttleit: 2.1.0 + transitivePeerDependencies: + - zod + '@ai-sdk/ui-utils@1.0.0(zod@3.25.76)': dependencies: '@ai-sdk/provider': 1.0.0 @@ -22987,12 +23452,23 @@ snapshots: human-id: 1.0.2 prettier: 2.8.8 + '@chevrotain/cst-dts-gen@10.5.0': + dependencies: + '@chevrotain/gast': 10.5.0 + '@chevrotain/types': 10.5.0 + lodash: 4.17.23 + '@chevrotain/cst-dts-gen@11.0.3': dependencies: '@chevrotain/gast': 11.0.3 '@chevrotain/types': 11.0.3 lodash-es: 4.17.21 + '@chevrotain/gast@10.5.0': + dependencies: + '@chevrotain/types': 10.5.0 + lodash: 4.17.23 + '@chevrotain/gast@11.0.3': dependencies: '@chevrotain/types': 11.0.3 @@ -23000,8 +23476,12 @@ snapshots: '@chevrotain/regexp-to-ast@11.0.3': {} + '@chevrotain/types@10.5.0': {} + '@chevrotain/types@11.0.3': {} + '@chevrotain/utils@10.5.0': {} + '@chevrotain/utils@11.0.3': {} '@clack/core@0.5.0': @@ -23212,6 +23692,16 @@ snapshots: optionalDependencies: '@rollup/rollup-darwin-arm64': 4.53.2 + '@electric-sql/pglite-socket@0.0.20(@electric-sql/pglite@0.3.15)': + dependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite-tools@0.2.20(@electric-sql/pglite@0.3.15)': + dependencies: + '@electric-sql/pglite': 0.3.15 + + '@electric-sql/pglite@0.3.15': {} + '@electric-sql/react@0.3.5(react@18.2.0)': dependencies: '@electric-sql/client': 0.4.0 @@ -23988,6 +24478,10 @@ snapshots: dependencies: hono: 4.5.11 + '@hono/node-server@1.19.9(hono@4.11.4)': + dependencies: + hono: 4.11.4 + '@hono/node-server@1.19.9(hono@4.11.8)': dependencies: hono: 4.11.8 @@ -24472,6 +24966,8 @@ snapshots: '@microsoft/fetch-event-source@2.0.1': {} + '@mixmark-io/domino@2.2.0': {} + '@modelcontextprotocol/sdk@1.25.2(hono@4.11.8)(supports-color@10.0.0)(zod@3.25.76)': dependencies: '@hono/node-server': 1.19.9(hono@4.11.8) @@ -24516,6 +25012,11 @@ snapshots: transitivePeerDependencies: - supports-color + '@mrleebo/prisma-ast@0.13.1': + dependencies: + chevrotain: 10.5.0 + lilconfig: 2.1.0 + '@msgpack/msgpack@3.0.0-beta2': {} '@neondatabase/serverless@0.9.5': @@ -24535,6 +25036,8 @@ snapshots: '@next/env@15.2.4': {} + '@next/env@15.3.3': {} + '@next/env@15.4.8': {} '@next/env@15.5.6': {} @@ -24548,6 +25051,9 @@ snapshots: '@next/swc-darwin-arm64@15.2.4': optional: true + '@next/swc-darwin-arm64@15.3.3': + optional: true + '@next/swc-darwin-arm64@15.4.8': optional: true @@ -24563,6 +25069,9 @@ snapshots: '@next/swc-darwin-x64@15.2.4': optional: true + '@next/swc-darwin-x64@15.3.3': + optional: true + '@next/swc-darwin-x64@15.4.8': optional: true @@ -24578,6 +25087,9 @@ snapshots: '@next/swc-linux-arm64-gnu@15.2.4': optional: true + '@next/swc-linux-arm64-gnu@15.3.3': + optional: true + '@next/swc-linux-arm64-gnu@15.4.8': optional: true @@ -24593,6 +25105,9 @@ snapshots: '@next/swc-linux-arm64-musl@15.2.4': optional: true + '@next/swc-linux-arm64-musl@15.3.3': + optional: true + '@next/swc-linux-arm64-musl@15.4.8': optional: true @@ -24608,6 +25123,9 @@ snapshots: '@next/swc-linux-x64-gnu@15.2.4': optional: true + '@next/swc-linux-x64-gnu@15.3.3': + optional: true + '@next/swc-linux-x64-gnu@15.4.8': optional: true @@ -24623,6 +25141,9 @@ snapshots: '@next/swc-linux-x64-musl@15.2.4': optional: true + '@next/swc-linux-x64-musl@15.3.3': + optional: true + '@next/swc-linux-x64-musl@15.4.8': optional: true @@ -24638,6 +25159,9 @@ snapshots: '@next/swc-win32-arm64-msvc@15.2.4': optional: true + '@next/swc-win32-arm64-msvc@15.3.3': + optional: true + '@next/swc-win32-arm64-msvc@15.4.8': optional: true @@ -24659,6 +25183,9 @@ snapshots: '@next/swc-win32-x64-msvc@15.2.4': optional: true + '@next/swc-win32-x64-msvc@15.3.3': + optional: true + '@next/swc-win32-x64-msvc@15.4.8': optional: true @@ -25548,8 +26075,18 @@ snapshots: transitivePeerDependencies: - pg-native + '@prisma/adapter-pg@7.4.2': + dependencies: + '@prisma/driver-adapter-utils': 7.4.2 + pg: 8.16.3 + postgres-array: 3.0.4 + transitivePeerDependencies: + - pg-native + '@prisma/client-runtime-utils@6.20.0-integration-next.8': {} + '@prisma/client-runtime-utils@7.4.2': {} + '@prisma/client@4.9.0(prisma@6.14.0(magicast@0.3.5)(typescript@5.5.4))': dependencies: '@prisma/engines-version': 4.9.0-42.ceb5c99003b99c9ee2c1d2e618e359c14aef2ea5 @@ -25578,6 +26115,13 @@ snapshots: prisma: 6.20.0-integration-next.8(@types/react@19.2.14)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) typescript: 5.5.4 + '@prisma/client@7.4.2(prisma@7.4.2(@types/react@19.2.14)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4))(typescript@5.5.4)': + dependencies: + '@prisma/client-runtime-utils': 7.4.2 + optionalDependencies: + prisma: 7.4.2(@types/react@19.2.14)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4) + typescript: 5.5.4 + '@prisma/config@6.14.0(magicast@0.3.5)': dependencies: c12: 3.1.0(magicast@0.3.5) @@ -25614,6 +26158,15 @@ snapshots: transitivePeerDependencies: - magicast + '@prisma/config@7.4.2(magicast@0.3.5)': + dependencies: + c12: 3.1.0(magicast@0.3.5) + deepmerge-ts: 7.1.5 + effect: 3.18.4 + empathic: 2.0.0 + transitivePeerDependencies: + - magicast + '@prisma/debug@4.16.2': dependencies: '@types/debug': 4.1.8 @@ -25630,6 +26183,32 @@ snapshots: '@prisma/debug@6.20.0-integration-next.8': {} + '@prisma/debug@7.2.0': {} + + '@prisma/debug@7.4.2': {} + + '@prisma/dev@0.20.0(typescript@5.5.4)': + dependencies: + '@electric-sql/pglite': 0.3.15 + '@electric-sql/pglite-socket': 0.0.20(@electric-sql/pglite@0.3.15) + '@electric-sql/pglite-tools': 0.2.20(@electric-sql/pglite@0.3.15) + '@hono/node-server': 1.19.9(hono@4.11.4) + '@mrleebo/prisma-ast': 0.13.1 + '@prisma/get-platform': 7.2.0 + '@prisma/query-plan-executor': 7.2.0 + foreground-child: 3.3.1 + get-port-please: 3.2.0 + hono: 4.11.4 + http-status-codes: 2.3.0 + pathe: 2.0.3 + proper-lockfile: 4.1.2 + remeda: 2.33.4 + std-env: 3.10.0 + valibot: 1.2.0(typescript@5.5.4) + zeptomatch: 2.1.0 + transitivePeerDependencies: + - typescript + '@prisma/driver-adapter-utils@6.16.0': dependencies: '@prisma/debug': 6.16.0 @@ -25638,6 +26217,10 @@ snapshots: dependencies: '@prisma/debug': 6.20.0-integration-next.8 + '@prisma/driver-adapter-utils@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/engines-version@4.9.0-42.ceb5c99003b99c9ee2c1d2e618e359c14aef2ea5': {} '@prisma/engines-version@6.14.0-25.717184b7b35ea05dfa71a3236b7af656013e1e49': {} @@ -25648,6 +26231,8 @@ snapshots: '@prisma/engines-version@6.20.0-11.next-80ee0a44bf5668992b0c909c946a755b86b56c95': {} + '@prisma/engines-version@7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919': {} + '@prisma/engines@6.14.0': dependencies: '@prisma/debug': 6.14.0 @@ -25676,6 +26261,13 @@ snapshots: '@prisma/fetch-engine': 6.20.0-integration-next.8 '@prisma/get-platform': 6.20.0-integration-next.8 + '@prisma/engines@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/engines-version': 7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919 + '@prisma/fetch-engine': 7.4.2 + '@prisma/get-platform': 7.4.2 + '@prisma/fetch-engine@6.14.0': dependencies: '@prisma/debug': 6.14.0 @@ -25700,6 +26292,12 @@ snapshots: '@prisma/engines-version': 6.20.0-11.next-80ee0a44bf5668992b0c909c946a755b86b56c95 '@prisma/get-platform': 6.20.0-integration-next.8 + '@prisma/fetch-engine@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/engines-version': 7.5.0-10.94a226be1cf2967af2541cca5529f0f7ba866919 + '@prisma/get-platform': 7.4.2 + '@prisma/generator-helper@4.16.2': dependencies: '@prisma/debug': 4.16.2 @@ -25725,6 +26323,14 @@ snapshots: dependencies: '@prisma/debug': 6.20.0-integration-next.8 + '@prisma/get-platform@7.2.0': + dependencies: + '@prisma/debug': 7.2.0 + + '@prisma/get-platform@7.4.2': + dependencies: + '@prisma/debug': 7.4.2 + '@prisma/instrumentation@6.11.1(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -25739,12 +26345,20 @@ snapshots: transitivePeerDependencies: - supports-color + '@prisma/query-plan-executor@7.2.0': {} + '@prisma/studio-core-licensed@0.6.0(@types/react@19.2.14)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': dependencies: '@types/react': 19.2.14 react: 19.1.0 react-dom: 19.1.0(react@19.1.0) + '@prisma/studio-core@0.13.1(@types/react@19.2.14)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)': + dependencies: + '@types/react': 19.2.14 + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + '@protobuf-ts/runtime@2.11.1': {} '@protobufjs/aspromise@1.1.2': {} @@ -31323,6 +31937,10 @@ snapshots: dependencies: '@types/react': 19.0.12 + '@types/react-dom@19.0.4(@types/react@19.2.14)': + dependencies: + '@types/react': 19.2.14 + '@types/react@18.2.48': dependencies: '@types/prop-types': 15.7.5 @@ -31446,6 +32064,8 @@ snapshots: '@types/trusted-types@2.0.7': optional: true + '@types/turndown@5.0.6': {} + '@types/unist@2.0.6': {} '@types/unist@3.0.3': {} @@ -31648,7 +32268,7 @@ snapshots: chalk: 4.1.2 css-what: 5.1.0 cssesc: 3.0.0 - csstype: 3.2.0 + csstype: 3.2.3 deep-object-diff: 1.1.9 deepmerge: 4.3.1 media-query-parser: 2.0.2 @@ -32140,6 +32760,14 @@ snapshots: '@opentelemetry/api': 1.9.0 zod: 3.25.76 + ai@6.0.49(zod@3.25.76): + dependencies: + '@ai-sdk/gateway': 3.0.22(zod@3.25.76) + '@ai-sdk/provider': 3.0.5 + '@ai-sdk/provider-utils': 4.0.9(zod@3.25.76) + '@opentelemetry/api': 1.9.0 + zod: 3.25.76 + ajv-formats@2.1.1(ajv@8.17.1): optionalDependencies: ajv: 8.17.1 @@ -32423,6 +33051,8 @@ snapshots: aws-sign2@0.7.0: {} + aws-ssl-profiles@1.1.2: {} + aws4@1.12.0: {} aws4fetch@1.0.18: {} @@ -32844,6 +33474,15 @@ snapshots: chevrotain: 11.0.3 lodash-es: 4.17.21 + chevrotain@10.5.0: + dependencies: + '@chevrotain/cst-dts-gen': 10.5.0 + '@chevrotain/gast': 10.5.0 + '@chevrotain/types': 10.5.0 + '@chevrotain/utils': 10.5.0 + lodash: 4.17.23 + regexp-to-ast: 0.5.0 + chevrotain@11.0.3: dependencies: '@chevrotain/cst-dts-gen': 11.0.3 @@ -33747,7 +34386,7 @@ snapshots: dom-helpers@5.2.1: dependencies: '@babel/runtime': 7.28.4 - csstype: 3.2.0 + csstype: 3.2.3 dom-serializer@2.0.0: dependencies: @@ -35161,6 +35800,11 @@ snapshots: cross-spawn: 7.0.6 signal-exit: 4.1.0 + foreground-child@3.3.1: + dependencies: + cross-spawn: 7.0.6 + signal-exit: 4.1.0 + forever-agent@0.6.1: {} form-data-encoder@1.7.2: {} @@ -35285,6 +35929,10 @@ snapshots: functions-have-names@1.2.3: {} + generate-function@2.3.1: + dependencies: + is-property: 1.0.2 + generic-names@4.0.0: dependencies: loader-utils: 3.2.1 @@ -35316,6 +35964,8 @@ snapshots: get-nonce@1.0.1: {} + get-port-please@3.2.0: {} + get-port@5.1.1: {} get-port@7.1.0: {} @@ -35516,6 +36166,8 @@ snapshots: chalk: 4.1.2 tinygradient: 1.1.5 + grammex@3.1.12: {} + grapheme-splitter@1.0.4: {} graphile-config@0.0.1-beta.8: @@ -35548,6 +36200,8 @@ snapshots: - supports-color - typescript + graphmatch@1.1.1: {} + graphql@16.6.0: {} gunzip-maybe@1.4.2: @@ -35661,6 +36315,12 @@ snapshots: web-namespaces: 2.0.1 zwitch: 2.0.4 + hast-util-sanitize@5.0.2: + dependencies: + '@types/hast': 3.0.4 + '@ungap/structured-clone': 1.3.0 + unist-util-position: 5.0.0 + hast-util-to-estree@2.1.0: dependencies: '@types/estree': 1.0.8 @@ -35754,6 +36414,8 @@ snapshots: dependencies: react-is: 16.13.1 + hono@4.11.4: {} + hono@4.11.8: {} hono@4.5.11: {} @@ -35818,6 +36480,8 @@ snapshots: jsprim: 1.4.2 sshpk: 1.18.0 + http-status-codes@2.3.0: {} + https-proxy-agent@5.0.1: dependencies: agent-base: 6.0.2 @@ -36108,6 +36772,8 @@ snapshots: is-promise@4.0.0: {} + is-property@1.0.2: {} + is-reference@3.0.3: dependencies: '@types/estree': 1.0.8 @@ -36702,6 +37368,8 @@ snapshots: lru-cache@7.18.3: {} + lru.min@1.1.4: {} + lucide-react@0.229.0(react@18.2.0): dependencies: react: 18.2.0 @@ -36774,6 +37442,8 @@ snapshots: marked@16.4.1: {} + marked@17.0.1: {} + marked@4.2.5: {} marked@7.0.4: {} @@ -37738,12 +38408,28 @@ snapshots: mustache@4.2.0: {} + mysql2@3.15.3: + dependencies: + aws-ssl-profiles: 1.1.2 + denque: 2.1.0 + generate-function: 2.3.1 + iconv-lite: 0.7.2 + long: 5.2.3 + lru.min: 1.1.4 + named-placeholders: 1.1.6 + seq-queue: 0.0.5 + sqlstring: 2.3.3 + mz@2.7.0: dependencies: any-promise: 1.3.0 object-assign: 4.1.1 thenify-all: 1.6.0 + named-placeholders@1.1.6: + dependencies: + lru.min: 1.1.4 + nan@2.23.1: optional: true @@ -37751,7 +38437,7 @@ snapshots: dependencies: '@jridgewell/sourcemap-codec': 1.5.5 css-tree: 1.1.3 - csstype: 3.2.0 + csstype: 3.2.3 fastest-stable-stringify: 2.0.2 inline-style-prefixer: 7.0.1 react: 18.2.0 @@ -37877,6 +38563,33 @@ snapshots: - '@babel/core' - babel-plugin-macros + next@15.3.3(@opentelemetry/api@1.9.0)(@playwright/test@1.37.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + dependencies: + '@next/env': 15.3.3 + '@swc/counter': 0.1.3 + '@swc/helpers': 0.5.15 + busboy: 1.6.0 + caniuse-lite: 1.0.30001754 + postcss: 8.4.31 + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + styled-jsx: 5.1.6(react@19.1.0) + optionalDependencies: + '@next/swc-darwin-arm64': 15.3.3 + '@next/swc-darwin-x64': 15.3.3 + '@next/swc-linux-arm64-gnu': 15.3.3 + '@next/swc-linux-arm64-musl': 15.3.3 + '@next/swc-linux-x64-gnu': 15.3.3 + '@next/swc-linux-x64-musl': 15.3.3 + '@next/swc-win32-arm64-msvc': 15.3.3 + '@next/swc-win32-x64-msvc': 15.3.3 + '@opentelemetry/api': 1.9.0 + '@playwright/test': 1.37.0 + sharp: 0.34.5 + transitivePeerDependencies: + - '@babel/core' + - babel-plugin-macros + next@15.4.8(@opentelemetry/api@1.9.0)(@playwright/test@1.37.0)(react-dom@19.0.0(react@19.0.0))(react@19.0.0): dependencies: '@next/env': 15.4.8 @@ -39082,6 +39795,23 @@ snapshots: - react - react-dom + prisma@7.4.2(@types/react@19.2.14)(better-sqlite3@11.10.0)(magicast@0.3.5)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(typescript@5.5.4): + dependencies: + '@prisma/config': 7.4.2(magicast@0.3.5) + '@prisma/dev': 0.20.0(typescript@5.5.4) + '@prisma/engines': 7.4.2 + '@prisma/studio-core': 0.13.1(@types/react@19.2.14)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) + mysql2: 3.15.3 + postgres: 3.4.7 + optionalDependencies: + better-sqlite3: 11.10.0 + typescript: 5.5.4 + transitivePeerDependencies: + - '@types/react' + - magicast + - react + - react-dom + prismjs@1.29.0: {} prismjs@1.30.0: {} @@ -39898,6 +40628,8 @@ snapshots: dependencies: regex-utilities: 2.3.0 + regexp-to-ast@0.5.0: {} + regexp.prototype.flags@1.4.3: dependencies: call-bind: 1.0.8 @@ -39925,6 +40657,10 @@ snapshots: rehype-harden@1.1.5: {} + rehype-harden@1.1.8: + dependencies: + unist-util-visit: 5.0.0 + rehype-katex@7.0.1: dependencies: '@types/hast': 3.0.4 @@ -39941,6 +40677,11 @@ snapshots: hast-util-raw: 9.1.0 vfile: 6.0.3 + rehype-sanitize@6.0.0: + dependencies: + '@types/hast': 3.0.4 + hast-util-sanitize: 5.0.2 + remark-frontmatter@4.0.1: dependencies: '@types/mdast': 3.0.10 @@ -40014,12 +40755,24 @@ snapshots: unified: 11.0.5 vfile: 6.0.3 + remark-rehype@11.1.2: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + mdast-util-to-hast: 13.2.0 + unified: 11.0.5 + vfile: 6.0.3 + remark-stringify@11.0.0: dependencies: '@types/mdast': 4.0.4 mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + remeda@2.33.4: {} + + remend@1.2.1: {} + remix-auth-email-link@2.0.2(@remix-run/server-runtime@2.1.0(typescript@5.5.4))(remix-auth@3.6.0(@remix-run/react@2.1.0(react-dom@18.2.0(react@18.2.0))(react@18.2.0)(typescript@5.5.4))(@remix-run/server-runtime@2.1.0(typescript@5.5.4))): dependencies: '@remix-run/server-runtime': 2.1.0(typescript@5.5.4) @@ -40425,6 +41178,8 @@ snapshots: transitivePeerDependencies: - supports-color + seq-queue@0.0.5: {} + serialize-javascript@6.0.1: dependencies: randombytes: 2.1.0 @@ -40820,6 +41575,8 @@ snapshots: argparse: 2.0.1 nearley: 2.20.1 + sqlstring@2.3.3: {} + sqs-consumer@7.5.0(@aws-sdk/client-sqs@3.454.0): dependencies: '@aws-sdk/client-sqs': 3.454.0 @@ -40885,6 +41642,8 @@ snapshots: statuses@2.0.2: {} + std-env@3.10.0: {} + std-env@3.7.0: {} std-env@3.8.1: {} @@ -40941,6 +41700,28 @@ snapshots: - '@types/react' - supports-color + streamdown@2.3.0(react-dom@19.1.0(react@19.1.0))(react@19.1.0): + dependencies: + clsx: 2.1.1 + hast-util-to-jsx-runtime: 2.3.6 + html-url-attributes: 3.0.1 + marked: 17.0.1 + react: 19.1.0 + react-dom: 19.1.0(react@19.1.0) + rehype-harden: 1.1.8 + rehype-raw: 7.0.0 + rehype-sanitize: 6.0.0 + remark-gfm: 4.0.1 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + remend: 1.2.1 + tailwind-merge: 3.5.0 + unified: 11.0.5 + unist-util-visit: 5.0.0 + unist-util-visit-parents: 6.0.1 + transitivePeerDependencies: + - supports-color + streamsearch@1.1.0: {} streamx@2.22.0: @@ -41177,6 +41958,12 @@ snapshots: react: 19.0.0 use-sync-external-store: 1.2.2(react@19.0.0) + swr@2.2.5(react@19.1.0): + dependencies: + client-only: 0.0.1 + react: 19.1.0 + use-sync-external-store: 1.2.2(react@19.1.0) + sync-content@2.0.1: dependencies: glob: 11.0.0 @@ -41214,6 +42001,8 @@ snapshots: tailwind-merge@3.3.1: {} + tailwind-merge@3.5.0: {} + tailwind-scrollbar-hide@1.1.7: {} tailwind-scrollbar@3.0.1(tailwindcss@3.4.1): @@ -41801,6 +42590,10 @@ snapshots: turbo-windows-64: 1.10.3 turbo-windows-arm64: 1.10.3 + turndown@7.2.2: + dependencies: + '@mixmark-io/domino': 2.2.0 + tw-animate-css@1.2.4: {} tweetnacl@0.14.5: {} @@ -42136,6 +42929,10 @@ snapshots: dependencies: react: 19.0.0 + use-sync-external-store@1.2.2(react@19.1.0): + dependencies: + react: 19.1.0 + util-deprecate@1.0.2: {} util@0.12.5: @@ -42171,6 +42968,10 @@ snapshots: optionalDependencies: typescript: 5.5.4 + valibot@1.2.0(typescript@5.5.4): + optionalDependencies: + typescript: 5.5.4 + validate-npm-package-license@3.0.4: dependencies: spdx-correct: 3.1.1 @@ -42324,11 +43125,11 @@ snapshots: '@vitest/spy': 3.1.4 '@vitest/utils': 3.1.4 chai: 5.2.0 - debug: 4.4.1 + debug: 4.4.3(supports-color@10.0.0) expect-type: 1.2.1 magic-string: 0.30.21 pathe: 2.0.3 - std-env: 3.9.0 + std-env: 3.10.0 tinybench: 2.9.0 tinyexec: 0.3.2 tinyglobby: 0.2.13 @@ -42689,6 +43490,11 @@ snapshots: toposort: 2.0.2 type-fest: 2.19.0 + zeptomatch@2.1.0: + dependencies: + grammex: 3.1.12 + graphmatch: 1.1.1 + zip-stream@6.0.1: dependencies: archiver-utils: 5.0.2 diff --git a/references/ai-chat/.gitignore b/references/ai-chat/.gitignore new file mode 100644 index 00000000000..30838110ecc --- /dev/null +++ b/references/ai-chat/.gitignore @@ -0,0 +1 @@ +lib/generated/ diff --git a/references/ai-chat/DEMO-SHORTHAND.md b/references/ai-chat/DEMO-SHORTHAND.md new file mode 100644 index 00000000000..ff846401a65 --- /dev/null +++ b/references/ai-chat/DEMO-SHORTHAND.md @@ -0,0 +1,51 @@ +# Demo Cheat Sheet + +## Pitch +- Started as workflow engine, now people building chat agents +- Deep AI SDK useChat integration +- One chat = one persistent isolated execution environment +- Two-way communication + +## 1. Preloading +- Click New Chat, DON'T type anything +- Flip to dashboard — run already executing +- "waiting for first message" span +- Zero cold start + +## 2. First message — PostHog query +- "What are the top events on our PostHog instance this week?" +- Watch posthogQuery tool call +- Real data, real HogQL +- Show trace: onTurnStart → run → tool call → response +- Run stays alive after turn + +## 3. Follow-up — incremental +- "Which of those are custom events vs autocapture?" +- Only new message sent, not full history +- Backend has context in memory +- Same execution environment + +## 4. Suspend/resume +- 60s idle → snapshot → suspend → zero compute +- Next message → restore → continue +- Same run, same state + +## 5. Tool subtasks +- "Can you research what's new with PostHog lately?" +- deepResearch = separate task, own container +- Streams progress back to chat +- Show trace: triggerAndSubscribe → child run +- Stop cancels child automatically + +## 6. Code +- All regions collapsed — show the skeleton +- idleTimeoutInSeconds, clientDataSchema +- Hooks: onPreload, onTurnStart, onTurnComplete, run +- Expand run: just return streamText() +- Expand onTurnComplete: background self-review, chat.inject() + +## Wrap +- One chat, one persistent run +- Lifecycle hooks, streaming, subtasks, background injection +- Snapshot/restore, full observability +- Available now diff --git a/references/ai-chat/DEMO.md b/references/ai-chat/DEMO.md new file mode 100644 index 00000000000..5cdb560317d --- /dev/null +++ b/references/ai-chat/DEMO.md @@ -0,0 +1,96 @@ +# AI Chat Demo Script (5-7 min) + +**Setup:** Three windows ready — ai-chat app (localhost:3000), Trigger.dev dashboard, VS Code with chat.ts open (all regions collapsed). + +**Audience:** PostHog event + +**Pitch:** Trigger.dev started as a workflow engine for async background tasks, but more and more people are using us to build full chat agents. We've built a deep integration with the AI SDK's useChat hook that connects a single chat to a single persisted, isolated, fully customizable execution environment with two-way communication. + +--- + +## 1. New chat — preloading (1 min) + +**Open localhost:3000. Click "New Chat".** + +> I haven't typed anything yet. But flip to the dashboard — + +**Switch to dashboard. Show the run that just started.** + +> There's already a run executing. This is preloading. When the user opens the chat page, the frontend calls `transport.preload()` which triggers the task immediately. It loaded the user from the DB, resolved the system prompt, created the chat record — all before the first keystroke. Imagine this in something like PostHog's AI product assistant — when a user opens the chat, you want the agent ready instantly, not cold-starting while they wait. + +**Point to the "waiting for first message" span.** + +--- + +## 2. First message + live analytics query (1.5 min) + +**Switch back to chat. Type: "What are the top events on our PostHog instance this week?"** + +> Now the first turn starts — and watch, it's going to call the posthogQuery tool. This tool writes a HogQL query and runs it against our actual PostHog instance — this is our real Trigger.dev analytics data. + +**Watch the tool call + results stream back.** + +> It wrote the query, executed it, and summarized the results — all in one turn. + +**Switch to dashboard, show turn 1 span with the tool call.** + +> Here's the lifecycle — onTurnStart persisted the message, run() called streamText, the LLM decided to use the posthogQuery tool, got the results, and generated a response. After the turn completes, the run doesn't end — it waits for the next message. Same process, same memory. + +--- + +## 3. Follow-up — incremental sends + persistent state (45s) + +**Switch back to chat. Send: "How does that compare to last week?" or "Which of those are custom events vs autocapture?"** + +**Switch to dashboard, show turn 2.** + +> Turn 2 — the frontend only sent the new user message, not the full conversation. The backend already has the accumulated context. It knows what "those" refers to because it's the same execution environment. For a product analytics assistant where users iteratively drill into their data, this is huge — no context lost between turns. + +--- + +## 4. Idle, suspend, resume (30s) + +> After 60 seconds of no messages, the run snapshots its state and suspends. Zero compute while the user is away. When they come back — maybe they went to check their PostHog dashboard based on what the agent told them and came back with a follow-up — we restore from the snapshot and continue. Same run, same state. + +**Point to the "suspended" span in the trace if visible.** + +--- + +## 5. Tool subtasks (1 min) + +**Switch back to chat. Send: "Can you research what's new with PostHog lately?"** + +> Now it's using the deepResearch tool — this one is different. It's a separate Trigger.dev task running in its own container, fetching multiple URLs and streaming progress back to the chat in real time. You could have tools for querying PostHog, tools for checking feature flags, tools for pulling session recordings — and the heavy ones run as subtasks with their own retries and traces. + +**Show the trace — triggerAndSubscribe span with child run nested inside.** + +> The parent subscribes to the child via realtime. If the user hits stop, the child gets cancelled automatically. + +--- + +## 6. The code (1.5 min) + +**Switch to VS Code with chat.ts, all regions collapsed.** + +> This is the whole thing — one file. A chat.task with lifecycle hooks and a run function. + +Point out the collapsed view: + +- `idleTimeoutInSeconds`, `clientDataSchema` — typed metadata from the frontend +- `onPreload` — that's what fired before the first message +- `onTurnStart`, `onTurnComplete` — persistence hooks +- `run` — just `return streamText()`. The SDK handles everything else. + +**Expand the run region.** + +> Messages come in already converted. You return streamText. The posthogQuery tool is just a plain AI SDK tool that calls the PostHog API — deepResearch is a subtask wrapped with ai.tool. Mix and match. + +**Expand onTurnComplete if time.** + +> After every turn we defer a background call to gpt-4o-mini that reviews the response with generateObject. If it finds improvements, chat.inject adds a system message before the next LLM call. The agent gets coaching between turns — and it doesn't block the user. + +--- + +## 7. Wrap up (15s) + +> One chat, one persistent run. Lifecycle hooks, streaming, tool subtasks, background self-improvement — all on Trigger.dev's infrastructure with snapshot/restore and full observability. This is available now in the SDK. diff --git a/references/ai-chat/README.md b/references/ai-chat/README.md new file mode 100644 index 00000000000..39a6038f8c8 --- /dev/null +++ b/references/ai-chat/README.md @@ -0,0 +1,62 @@ +# AI Chat Reference App + +A multi-turn chat app built with the AI SDK's `useChat` hook and Trigger.dev's `chat.task`. Conversations run as durable Trigger.dev tasks with realtime streaming, automatic message accumulation, and persistence across page refreshes. + +## Data Models + +### Chat + +The conversation itself — your application data. + +| Column | Description | +| ---------- | ---------------------------------------- | +| `id` | Unique chat ID (generated on the client) | +| `title` | Display title for the sidebar | +| `messages` | Full `UIMessage[]` history (JSON) | + +A Chat lives forever (until the user deletes it). It is independent of any particular Trigger.dev run. + +### ChatSession + +The transport's connection state for a chat — what the frontend needs to reconnect to the same Trigger.dev run after a page refresh. + +| Column | Description | +| ------------------- | --------------------------------------------------------------------------- | +| `id` | Same as the chat ID (1:1 relationship) | +| `runId` | The Trigger.dev run handling this conversation | +| `publicAccessToken` | Scoped token for reading the run's stream and sending input stream messages | +| `lastEventId` | Stream position — used to resume without replaying old events | + +A Chat can outlive many ChatSessions. When the run ends (turn timeout, max turns reached, crash), the ChatSession is gone but the Chat and its messages remain. The next message from the user starts a fresh run and creates a new ChatSession for the same Chat. + +**Think of it as: Chat = the conversation, ChatSession = the live connection to the run handling it.** + +## Lifecycle Hooks + +Persistence is handled server-side in the Trigger.dev task via three hooks: + +- **`onChatStart`** — Creates the Chat and ChatSession records when a new conversation starts (turn 0). +- **`onTurnStart`** — Saves messages and updates the session _before_ streaming begins, so a mid-stream page refresh still shows the user's message. +- **`onTurnComplete`** — Saves the assistant's response and the `lastEventId` for stream resumption. + +## Setup + +```bash +# From the repo root +pnpm run docker # Start PostgreSQL, Redis, Electric +pnpm run db:migrate # Run webapp migrations +pnpm run db:seed # Seed the database + +# Set up the reference app's database +cd references/ai-chat +cp .env.example .env # Edit DATABASE_URL if needed +npx prisma migrate deploy + +# Build and run +pnpm run build --filter trigger.dev --filter @trigger.dev/sdk +pnpm run dev --filter webapp # In one terminal +cd references/ai-chat && pnpm exec trigger dev # In another +cd references/ai-chat && pnpm run dev # In another +``` + +Open http://localhost:3000 to use the chat app. diff --git a/references/ai-chat/next-env.d.ts b/references/ai-chat/next-env.d.ts new file mode 100644 index 00000000000..1b3be0840f3 --- /dev/null +++ b/references/ai-chat/next-env.d.ts @@ -0,0 +1,5 @@ +/// +/// + +// NOTE: This file should not be edited +// see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/references/ai-chat/next.config.ts b/references/ai-chat/next.config.ts new file mode 100644 index 00000000000..ca6c9392a18 --- /dev/null +++ b/references/ai-chat/next.config.ts @@ -0,0 +1,7 @@ +import type { NextConfig } from "next"; + +const nextConfig: NextConfig = { + devIndicators: false, +}; + +export default nextConfig; diff --git a/references/ai-chat/package.json b/references/ai-chat/package.json new file mode 100644 index 00000000000..8c030cac577 --- /dev/null +++ b/references/ai-chat/package.json @@ -0,0 +1,42 @@ +{ + "name": "references-ai-chat", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev --turbopack", + "build": "next build", + "start": "next start", + "dev:trigger": "trigger dev", + "db:migrate": "prisma migrate dev", + "db:push": "prisma db push", + "db:generate": "prisma generate" + }, + "dependencies": { + "@ai-sdk/anthropic": "^3.0.0", + "@ai-sdk/openai": "^3.0.0", + "@ai-sdk/react": "^3.0.0", + "@prisma/adapter-pg": "^7.4.2", + "@prisma/client": "^7.4.2", + "@trigger.dev/sdk": "workspace:*", + "ai": "^6.0.0", + "next": "15.3.3", + "pg": "^8.16.3", + "react": "^19.0.0", + "react-dom": "^19.0.0", + "streamdown": "^2.3.0", + "turndown": "^7.2.2", + "zod": "3.25.76" + }, + "devDependencies": { + "@tailwindcss/postcss": "^4", + "@trigger.dev/build": "workspace:*", + "@types/node": "^22", + "@types/react": "^19", + "@types/react-dom": "^19", + "@types/turndown": "^5.0.6", + "tailwindcss": "^4", + "prisma": "^7.4.2", + "trigger.dev": "workspace:*", + "typescript": "^5" + } +} \ No newline at end of file diff --git a/references/ai-chat/postcss.config.mjs b/references/ai-chat/postcss.config.mjs new file mode 100644 index 00000000000..79bcf135dc4 --- /dev/null +++ b/references/ai-chat/postcss.config.mjs @@ -0,0 +1,8 @@ +/** @type {import('postcss-load-config').Config} */ +const config = { + plugins: { + "@tailwindcss/postcss": {}, + }, +}; + +export default config; diff --git a/references/ai-chat/prisma.config.ts b/references/ai-chat/prisma.config.ts new file mode 100644 index 00000000000..d73df7b3168 --- /dev/null +++ b/references/ai-chat/prisma.config.ts @@ -0,0 +1,12 @@ +import "dotenv/config"; +import { defineConfig, env } from "prisma/config"; + +export default defineConfig({ + schema: "prisma/schema.prisma", + migrations: { + path: "prisma/migrations", + }, + datasource: { + url: env("DATABASE_URL"), + }, +}); diff --git a/references/ai-chat/prisma/migrations/20260305112427_init/migration.sql b/references/ai-chat/prisma/migrations/20260305112427_init/migration.sql new file mode 100644 index 00000000000..951cd33d94e --- /dev/null +++ b/references/ai-chat/prisma/migrations/20260305112427_init/migration.sql @@ -0,0 +1,20 @@ +-- CreateTable +CREATE TABLE "Chat" ( + "id" TEXT NOT NULL, + "title" TEXT NOT NULL, + "messages" JSONB NOT NULL DEFAULT '[]', + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "Chat_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "ChatSession" ( + "id" TEXT NOT NULL, + "runId" TEXT NOT NULL, + "publicAccessToken" TEXT NOT NULL, + "lastEventId" TEXT, + + CONSTRAINT "ChatSession_pkey" PRIMARY KEY ("id") +); diff --git a/references/ai-chat/prisma/migrations/20260306165319_add_user_model/migration.sql b/references/ai-chat/prisma/migrations/20260306165319_add_user_model/migration.sql new file mode 100644 index 00000000000..4a1bca35872 --- /dev/null +++ b/references/ai-chat/prisma/migrations/20260306165319_add_user_model/migration.sql @@ -0,0 +1,18 @@ +-- AlterTable +ALTER TABLE "Chat" ADD COLUMN "userId" TEXT; + +-- CreateTable +CREATE TABLE "User" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "plan" TEXT NOT NULL DEFAULT 'free', + "preferredModel" TEXT, + "messageCount" INTEGER NOT NULL DEFAULT 0, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "User_pkey" PRIMARY KEY ("id") +); + +-- AddForeignKey +ALTER TABLE "Chat" ADD CONSTRAINT "Chat_userId_fkey" FOREIGN KEY ("userId") REFERENCES "User"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/references/ai-chat/prisma/migrations/20260327180000_remove_user_tool/migration.sql b/references/ai-chat/prisma/migrations/20260327180000_remove_user_tool/migration.sql new file mode 100644 index 00000000000..c7a35afc8f2 --- /dev/null +++ b/references/ai-chat/prisma/migrations/20260327180000_remove_user_tool/migration.sql @@ -0,0 +1,2 @@ +-- DropTable +DROP TABLE IF EXISTS "UserTool"; diff --git a/references/ai-chat/prisma/migrations/migration_lock.toml b/references/ai-chat/prisma/migrations/migration_lock.toml new file mode 100644 index 00000000000..044d57cdb0d --- /dev/null +++ b/references/ai-chat/prisma/migrations/migration_lock.toml @@ -0,0 +1,3 @@ +# Please do not edit this file manually +# It should be added in your version-control system (e.g., Git) +provider = "postgresql" diff --git a/references/ai-chat/prisma/schema.prisma b/references/ai-chat/prisma/schema.prisma new file mode 100644 index 00000000000..cbdbb90e512 --- /dev/null +++ b/references/ai-chat/prisma/schema.prisma @@ -0,0 +1,37 @@ +generator client { + provider = "prisma-client" + output = "../lib/generated/prisma" +} + +datasource db { + provider = "postgresql" +} + +model User { + id String @id + name String + plan String @default("free") // "free" | "pro" + preferredModel String? + messageCount Int @default(0) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + chats Chat[] +} + +model Chat { + id String @id + title String + model String @default("gpt-4o-mini") + messages Json @default("[]") + userId String? + user User? @relation(fields: [userId], references: [id]) + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt +} + +model ChatSession { + id String @id // chatId + runId String + publicAccessToken String + lastEventId String? +} diff --git a/references/ai-chat/src/app/actions.ts b/references/ai-chat/src/app/actions.ts new file mode 100644 index 00000000000..355d0c93fba --- /dev/null +++ b/references/ai-chat/src/app/actions.ts @@ -0,0 +1,116 @@ +"use server"; + +import { auth } from "@trigger.dev/sdk"; +import type { ResolveChatAccessTokenParams } from "@trigger.dev/sdk/chat"; +import type { aiChat, aiChatRaw, aiChatSession } from "@/trigger/chat"; +import type { ChatUiMessage } from "@/lib/chat-tools"; +import { prisma } from "@/lib/prisma"; + +/** Short-lived PATs for local testing of expiry + `renewRunAccessToken` (not for production). */ +const CHAT_EXAMPLE_PAT_TTL = "1h" as const; + +export type ChatReferenceTaskId = "ai-chat" | "ai-chat-raw" | "ai-chat-session"; + +function isChatReferenceTaskId(id: string): id is ChatReferenceTaskId { + return id === "ai-chat" || id === "ai-chat-raw" || id === "ai-chat-session"; +} + +/** Keeps compile-time alignment with exported chat tasks. */ +type TaskIdentifierForChat = + | (typeof aiChat)["id"] + | (typeof aiChatRaw)["id"] + | (typeof aiChatSession)["id"]; + +export async function getChatToken( + input: ResolveChatAccessTokenParams & { taskId?: string } +): Promise { + const id = input.taskId ?? "ai-chat"; + const task: TaskIdentifierForChat = !isChatReferenceTaskId(id) ? "ai-chat" : id; + return auth.createTriggerPublicToken(task, { expirationTime: CHAT_EXAMPLE_PAT_TTL }); +} + +/** + * Mint a fresh run-scoped PAT for an existing chat run (same scopes as the task’s turn token). + * Used by TriggerChatTransport when the stored PAT expires (401 on realtime / input stream). + * Persists `publicAccessToken` (and `runId`) on `ChatSession` for this `chatId`. + * Requires TRIGGER_SECRET_KEY (or configured secret) in the server environment. + */ +export async function renewRunAccessTokenForChat( + chatId: string, + runId: string +): Promise { + try { + const token = await auth.createPublicToken({ + scopes: { + read: { runs: runId }, + write: { inputStreams: runId }, + }, + expirationTime: CHAT_EXAMPLE_PAT_TTL, + }); + + if (typeof token !== "string" || token.length === 0) { + return undefined; + } + + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: token }, + update: { runId, publicAccessToken: token }, + }); + + return token; + } catch { + return undefined; + } +} + +export async function getChatList() { + const chats = await prisma.chat.findMany({ + select: { id: true, title: true, model: true, createdAt: true, updatedAt: true }, + orderBy: { updatedAt: "desc" }, + }); + return chats.map((c) => ({ + id: c.id, + title: c.title, + model: c.model, + createdAt: c.createdAt.getTime(), + updatedAt: c.updatedAt.getTime(), + })); +} + +export async function getChatMessages(chatId: string): Promise { + const found = await prisma.chat.findUnique({ where: { id: chatId } }); + if (!found) return []; + return found.messages as unknown as ChatUiMessage[]; +} + +export async function deleteChat(chatId: string) { + await prisma.chat.delete({ where: { id: chatId } }).catch(() => { }); + await prisma.chatSession.delete({ where: { id: chatId } }).catch(() => { }); +} + +export async function updateChatTitle(chatId: string, title: string) { + await prisma.chat.update({ where: { id: chatId }, data: { title } }).catch(() => { }); +} + +export async function updateSessionLastEventId(chatId: string, lastEventId: string) { + await prisma.chatSession.update({ where: { id: chatId }, data: { lastEventId } }).catch(() => { }); +} + +export async function deleteSessionAction(chatId: string) { + await prisma.chatSession.delete({ where: { id: chatId } }).catch(() => { }); +} + +export async function getAllSessions() { + const sessions = await prisma.chatSession.findMany(); + const result: Record = + {}; + for (const s of sessions) { + result[s.id] = { + runId: s.runId, + publicAccessToken: s.publicAccessToken, + lastEventId: s.lastEventId ?? undefined, + }; + } + return result; +} diff --git a/references/ai-chat/src/app/globals.css b/references/ai-chat/src/app/globals.css new file mode 100644 index 00000000000..92c4b9a7860 --- /dev/null +++ b/references/ai-chat/src/app/globals.css @@ -0,0 +1,2 @@ +@import "tailwindcss"; +@source "../../../node_modules/streamdown/dist/*.js"; diff --git a/references/ai-chat/src/app/layout.tsx b/references/ai-chat/src/app/layout.tsx new file mode 100644 index 00000000000..544dd9142d8 --- /dev/null +++ b/references/ai-chat/src/app/layout.tsx @@ -0,0 +1,16 @@ +import type { Metadata } from "next"; +import "./globals.css"; +import "streamdown/styles.css"; + +export const metadata: Metadata = { + title: "AI Chat — Trigger.dev", + description: "AI SDK useChat powered by Trigger.dev durable tasks", +}; + +export default function RootLayout({ children }: { children: React.ReactNode }) { + return ( + + {children} + + ); +} diff --git a/references/ai-chat/src/app/page.tsx b/references/ai-chat/src/app/page.tsx new file mode 100644 index 00000000000..37ead39c50a --- /dev/null +++ b/references/ai-chat/src/app/page.tsx @@ -0,0 +1,59 @@ +"use client"; + +import type { ChatUiMessage } from "@/lib/chat-tools"; +import { useEffect, useState } from "react"; +import { ChatApp } from "@/components/chat-app"; +import { getChatList, getChatMessages, getAllSessions } from "@/app/actions"; + +type ChatMeta = { + id: string; + title: string; + model: string; + createdAt: number; + updatedAt: number; +}; + +export default function Home() { + const [chatList, setChatList] = useState([]); + const [activeChatId, setActiveChatId] = useState(null); + const [initialMessages, setInitialMessages] = useState([]); + const [initialSessions, setInitialSessions] = useState< + Record + >({}); + const [loaded, setLoaded] = useState(false); + const [taskMode, setTaskMode] = useState("ai-chat"); + + useEffect(() => { + async function load() { + const [list, sessions] = await Promise.all([getChatList(), getAllSessions()]); + setChatList(list); + setInitialSessions(sessions); + + let firstChatId: string | null = null; + let firstMessages: ChatUiMessage[] = []; + if (list.length > 0) { + firstChatId = list[0]!.id; + firstMessages = await getChatMessages(firstChatId); + } + + setActiveChatId(firstChatId); + setInitialMessages(firstMessages); + setLoaded(true); + } + load(); + }, []); + + if (!loaded) return null; + + return ( + + ); +} diff --git a/references/ai-chat/src/components/chat-app.tsx b/references/ai-chat/src/components/chat-app.tsx new file mode 100644 index 00000000000..7b453463f35 --- /dev/null +++ b/references/ai-chat/src/components/chat-app.tsx @@ -0,0 +1,198 @@ +"use client"; + +import { generateId } from "ai"; +import { useTriggerChatTransport } from "@trigger.dev/sdk/chat/react"; +import type { ChatUiMessage } from "@/lib/chat-tools"; +import { useCallback, useEffect, useState } from "react"; +import { Chat } from "@/components/chat"; +import { ChatSidebar } from "@/components/chat-sidebar"; +import { DEFAULT_MODEL } from "@/lib/models"; +import { + getChatToken, + getChatList, + getChatMessages, + deleteChat as deleteChatAction, + updateChatTitle, + deleteSessionAction, + renewRunAccessTokenForChat, +} from "@/app/actions"; + +type ChatMeta = { + id: string; + title: string; + model: string; + createdAt: number; + updatedAt: number; +}; + +type SessionInfo = { + runId: string; + publicAccessToken: string; + lastEventId?: string; +}; + +type ChatAppProps = { + taskMode: string; + onTaskModeChange: (mode: string) => void; + initialChatList: ChatMeta[]; + initialActiveChatId: string | null; + initialMessages: ChatUiMessage[]; + initialSessions: Record; +}; + +export function ChatApp({ + taskMode, + onTaskModeChange, + initialChatList, + initialActiveChatId, + initialMessages, + initialSessions, +}: ChatAppProps) { + const [chatList, setChatList] = useState(initialChatList); + const [activeChatId, setActiveChatId] = useState(initialActiveChatId); + const [messages, setMessages] = useState(initialMessages); + const [sessions, setSessions] = useState>(initialSessions); + + // Model for new chats (before first message is sent) + const [newChatModel, setNewChatModel] = useState(DEFAULT_MODEL); + const [preloadEnabled, setPreloadEnabled] = useState(true); + const [idleTimeoutInSeconds, setIdleTimeoutInSeconds] = useState(60); + + const handleSessionChange = useCallback((chatId: string, session: SessionInfo | null) => { + if (session) { + setSessions((prev) => ({ ...prev, [chatId]: session })); + } else { + setSessions((prev) => { + const next = { ...prev }; + delete next[chatId]; + return next; + }); + deleteSessionAction(chatId); + } + }, []); + + const transport = useTriggerChatTransport({ + task: taskMode, + accessToken: (params) => getChatToken({ ...params, taskId: taskMode }), + renewRunAccessToken: ({ chatId, runId }) => renewRunAccessTokenForChat(chatId, runId), + baseURL: process.env.NEXT_PUBLIC_TRIGGER_API_URL, + sessions: initialSessions, + onSessionChange: handleSessionChange, + clientData: { userId: "user_123" }, + triggerOptions: { + tags: ["user:user_123"], + }, + }); + + // Load messages when active chat changes + useEffect(() => { + if (!activeChatId) { + setMessages([]); + return; + } + // Don't reload if we already have the initial messages for the initial chat + if (activeChatId === initialActiveChatId && messages === initialMessages) { + return; + } + getChatMessages(activeChatId).then(setMessages); + }, [activeChatId]); + + function handleNewChat() { + const id = generateId(); + setActiveChatId(id); + setMessages([]); + setNewChatModel(DEFAULT_MODEL); + if (preloadEnabled) { + // Eagerly start the run — onPreload fires immediately for initialization + transport.preload(id, { idleTimeoutInSeconds }); + } + } + + function handleSelectChat(id: string) { + setActiveChatId(id); + } + + async function handleDeleteChat(id: string) { + await deleteChatAction(id); + const list = await getChatList(); + setChatList(list); + if (activeChatId === id) { + if (list.length > 0) { + setActiveChatId(list[0]!.id); + } else { + setActiveChatId(null); + } + } + } + + const handleFirstMessage = useCallback(async (chatId: string, text: string) => { + const title = text.slice(0, 40).trim() || "New chat"; + await updateChatTitle(chatId, title); + const list = await getChatList(); + setChatList(list); + }, []); + + const handleMessagesChange = useCallback(async (_chatId: string, _messages: ChatUiMessage[]) => { + // Messages are persisted server-side via onTurnComplete. + // Refresh the chat list to update timestamps. + const list = await getChatList(); + setChatList(list); + }, []); + + // Determine the model for the active chat + const activeChatMeta = chatList.find((c) => c.id === activeChatId); + const isNewChat = activeChatId != null && !activeChatMeta; + const activeModel = isNewChat ? newChatModel : activeChatMeta?.model ?? DEFAULT_MODEL; + + // Get session for the active chat + const activeSession = activeChatId ? sessions[activeChatId] : undefined; + + return ( +
+ +
+ {activeChatId ? ( + 0} + model={activeModel} + isNewChat={isNewChat} + onModelChange={isNewChat ? setNewChatModel : undefined} + session={activeSession} + dashboardUrl={process.env.NEXT_PUBLIC_TRIGGER_DASHBOARD_URL} + onFirstMessage={handleFirstMessage} + onMessagesChange={handleMessagesChange} + /> + ) : ( +
+
+

No conversation selected

+ +
+
+ )} +
+
+ ); +} diff --git a/references/ai-chat/src/components/chat-sidebar.tsx b/references/ai-chat/src/components/chat-sidebar.tsx new file mode 100644 index 00000000000..198d9afde37 --- /dev/null +++ b/references/ai-chat/src/components/chat-sidebar.tsx @@ -0,0 +1,130 @@ +"use client"; + +type ChatMeta = { + id: string; + title: string; + createdAt: number; + updatedAt: number; +}; + +function timeAgo(ts: number): string { + const seconds = Math.floor((Date.now() - ts) / 1000); + if (seconds < 60) return "just now"; + const minutes = Math.floor(seconds / 60); + if (minutes < 60) return `${minutes}m ago`; + const hours = Math.floor(minutes / 60); + if (hours < 24) return `${hours}h ago`; + const days = Math.floor(hours / 24); + return `${days}d ago`; +} + +type ChatSidebarProps = { + chats: ChatMeta[]; + activeChatId: string | null; + onSelectChat: (id: string) => void; + onNewChat: () => void; + onDeleteChat: (id: string) => void; + preloadEnabled: boolean; + onPreloadChange: (enabled: boolean) => void; + idleTimeoutInSeconds: number; + onIdleTimeoutChange: (seconds: number) => void; + taskMode: string; + onTaskModeChange: (mode: string) => void; +}; + +export function ChatSidebar({ + chats, + activeChatId, + onSelectChat, + onNewChat, + onDeleteChat, + preloadEnabled, + onPreloadChange, + idleTimeoutInSeconds, + onIdleTimeoutChange, + taskMode, + onTaskModeChange, +}: ChatSidebarProps) { + const sorted = [...chats].sort((a, b) => b.updatedAt - a.updatedAt); + + return ( +
+
+ +
+ +
+ {sorted.length === 0 && ( +

No conversations yet

+ )} + + {sorted.map((chat) => ( + + ))} +
+ +
+ +
+ Idle timeout + onIdleTimeoutChange(Number(e.target.value))} + className="w-16 rounded border border-gray-300 px-1.5 py-0.5 text-xs text-gray-600 outline-none focus:border-blue-500" + /> + s +
+
+ Task + +
+
+
+ ); +} diff --git a/references/ai-chat/src/components/chat.tsx b/references/ai-chat/src/components/chat.tsx new file mode 100644 index 00000000000..f3b700abf43 --- /dev/null +++ b/references/ai-chat/src/components/chat.tsx @@ -0,0 +1,701 @@ +"use client"; + +import { useChat } from "@ai-sdk/react"; +import type { ChatUiMessage } from "@/lib/chat-tools"; +import type { TriggerChatTransport } from "@trigger.dev/sdk/chat"; +import type { CompactionChunkData } from "@trigger.dev/sdk/ai"; +import { usePendingMessages } from "@trigger.dev/sdk/chat/react"; +import { useEffect, useRef, useState } from "react"; +import { Streamdown } from "streamdown"; +import { MODEL_OPTIONS } from "@/lib/models"; + +function ToolInvocation({ part }: { part: any }) { + const [expanded, setExpanded] = useState(false); + const toolName = part.type.startsWith("tool-") ? part.type.slice(5) : "tool"; + const state = part.state ?? "input-available"; + const args = part.input; + const result = part.output; + + const isLoading = state === "input-streaming" || state === "input-available"; + const isError = state === "output-error"; + + return ( +
+ + + {expanded && ( +
+ {args && Object.keys(args).length > 0 && ( +
+
Input
+
+                {JSON.stringify(args, null, 2)}
+              
+
+ )} + {state === "output-available" && result !== undefined && ( +
+
Output
+
+                {JSON.stringify(result, null, 2)}
+              
+
+ )} + {isError && result !== undefined && ( +
+
Error
+
+                {typeof result === "string" ? result : JSON.stringify(result, null, 2)}
+              
+
+ )} +
+ )} +
+ ); +} + +function ResearchProgress({ part }: { part: any }) { + const data = part.data as { + status: "fetching" | "done"; + query: string; + current: number; + total: number; + currentUrl?: string; + completedUrls: string[]; + }; + + const isDone = data.status === "done"; + + return ( +
+
+ {isDone ? ( + + ) : ( + + )} + + {isDone + ? `Research complete — ${data.total} sources fetched` + : `Researching "${data.query}" (${data.current}/${data.total})`} + +
+ {data.currentUrl && !isDone && ( +
Fetching {data.currentUrl}
+ )} + {data.completedUrls.length > 0 && ( +
+ {data.completedUrls.map((url, i) => ( +
+ ✓ {url} +
+ ))} +
+ )} +
+ ); +} + +type TtfbEntry = { turn: number; ttfbMs: number }; + +function DebugPanel({ + chatId, + model, + status, + session, + dashboardUrl, + messageCount, + ttfbHistory, +}: { + chatId: string; + model: string; + status: string; + session?: { runId: string; publicAccessToken: string; lastEventId?: string }; + dashboardUrl?: string; + messageCount: number; + ttfbHistory: TtfbEntry[]; +}) { + const [open, setOpen] = useState(false); + + const runUrl = + session?.runId && dashboardUrl ? `${dashboardUrl}/runs/${session.runId}` : undefined; + + const latestTtfb = ttfbHistory.length > 0 ? ttfbHistory[ttfbHistory.length - 1]! : undefined; + const avgTtfb = + ttfbHistory.length > 0 + ? Math.round(ttfbHistory.reduce((sum, e) => sum + e.ttfbMs, 0) / ttfbHistory.length) + : undefined; + + return ( +
+ + + {open && ( +
+ + + + + {session ? ( + <> + + + + ) : ( + + )} + {ttfbHistory.length > 0 && ( + <> +
+ TTFB + {avgTtfb !== undefined && ( + avg {avgTtfb.toLocaleString()}ms + )} +
+ {ttfbHistory.map((entry) => ( +
+ Turn {entry.turn} + {entry.ttfbMs.toLocaleString()}ms +
+ ))} + + )} +
+ )} +
+ ); +} + +function Row({ + label, + value, + mono, + link, +}: { + label: string; + value: string; + mono?: boolean; + link?: string; +}) { + return ( +
+ {label} + {link ? ( + + {value} + + ) : ( + {value} + )} +
+ ); +} + +type ChatProps = { + chatId: string; + initialMessages: ChatUiMessage[]; + transport: TriggerChatTransport; + resume?: boolean; + model: string; + isNewChat: boolean; + onModelChange?: (model: string) => void; + session?: { runId: string; publicAccessToken: string; lastEventId?: string }; + dashboardUrl?: string; + onFirstMessage?: (chatId: string, text: string) => void; + onMessagesChange?: (chatId: string, messages: ChatUiMessage[]) => void; +}; + +export function Chat({ + chatId, + initialMessages, + transport, + resume: resumeProp, + model, + isNewChat, + onModelChange, + session, + dashboardUrl, + onFirstMessage, + onMessagesChange, +}: ChatProps) { + const [input, setInput] = useState(""); + const hasCalledFirstMessage = useRef(false); + + // TTFB tracking + const sendTimestamp = useRef(null); + const turnCounter = useRef(0); + const [ttfbHistory, setTtfbHistory] = useState([]); + + const { messages, setMessages, sendMessage, stop, status, error } = useChat({ + id: chatId, + messages: initialMessages, + transport, + resume: resumeProp, + }); + + // Notify parent of first user message (for chat metadata creation) + useEffect(() => { + if (hasCalledFirstMessage.current) return; + const firstUser = messages.find((m) => m.role === "user"); + if (firstUser) { + hasCalledFirstMessage.current = true; + const text = firstUser.parts + .filter((p: any) => p.type === "text") + .map((p: any) => p.text) + .join(" "); + onFirstMessage?.(chatId, text); + } + }, [messages, chatId, onFirstMessage]); + + // TTFB detection: record when first assistant content appears after send + useEffect(() => { + if (status !== "streaming") return; + if (sendTimestamp.current === null) return; + const lastMsg = messages[messages.length - 1]; + if (lastMsg?.role === "assistant") { + const ttfbMs = Date.now() - sendTimestamp.current; + const turn = turnCounter.current; + sendTimestamp.current = null; + setTtfbHistory((prev) => [...prev, { turn, ttfbMs }]); + } + }, [status, messages]); + + // Pending messages — handles steering messages during streaming + const pending = usePendingMessages({ + transport, + chatId, + status, + messages, + setMessages, + sendMessage, + metadata: { model }, + }); + + // Expose test helpers for automated testing via Chrome DevTools. + // All actions go through refs so closures always call the latest version. + const stateRef = useRef({ status, messages, pending: pending.pending }); + stateRef.current = { status, messages, pending: pending.pending }; + + const actionsRef = useRef({ + steer: pending.steer, + queue: pending.queue, + promote: pending.promoteToSteering, + send: (text: string) => { + turnCounter.current++; + sendTimestamp.current = Date.now(); + sendMessage({ text }, { metadata: { model } }); + }, + stop, + }); + actionsRef.current = { + steer: pending.steer, + queue: pending.queue, + promote: pending.promoteToSteering, + send: (text: string) => { + turnCounter.current++; + sendTimestamp.current = Date.now(); + sendMessage({ text }, { metadata: { model } }); + }, + stop, + }; + + useEffect(() => { + (window as any).__chat = { + get status() { + return stateRef.current.status; + }, + get messages() { + return stateRef.current.messages; + }, + get pending() { + return stateRef.current.pending; + }, + get runId() { + return transport.getSession(chatId)?.runId ?? session?.runId ?? null; + }, + chatId, + steer: (text: string) => actionsRef.current.steer(text), + queue: (text: string) => actionsRef.current.queue(text), + promote: (id: string) => actionsRef.current.promote(id), + send: (text: string) => actionsRef.current.send(text), + stop: () => actionsRef.current.stop(), + // Wait for a tool call to appear, then steer + steerOnToolCall: (text: string) => + new Promise((resolve) => { + const check = setInterval(() => { + const { messages: msgs } = stateRef.current; + const lastMsg = msgs[msgs.length - 1]; + const hasTool = + lastMsg?.role === "assistant" && + lastMsg.parts?.some( + (p: any) => p.type?.startsWith("tool-") + ); + if (hasTool) { + clearInterval(check); + console.log( + "[__chat] steerOnToolCall: tool detected, steering now. status:", + stateRef.current.status + ); + actionsRef.current.steer(text); + resolve(); + } + }, 200); + }), + // Wait for status to become a value + waitForStatus: (target: string) => + new Promise((resolve) => { + const check = setInterval(() => { + if (stateRef.current.status === target) { + clearInterval(check); + resolve(); + } + }, 100); + }), + steerAfterDelay: (text: string, ms: number) => + new Promise((r) => + setTimeout(() => { + actionsRef.current.steer(text); + r(); + }, ms) + ), + queueAfterDelay: (text: string, ms: number) => + new Promise((r) => + setTimeout(() => { + actionsRef.current.queue(text); + r(); + }, ms) + ), + }; + return () => { + delete (window as any).__chat; + }; + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [chatId]); + + // Persist messages when a turn completes + const prevStatus = useRef(status); + useEffect(() => { + const turnCompleted = prevStatus.current === "streaming" && status === "ready"; + prevStatus.current = status; + if (!turnCompleted) return; + if (messages.length > 0) { + onMessagesChange?.(chatId, messages); + } + }, [status, messages, chatId, onMessagesChange]); + + return ( +
+ {/* Model selector for new chats */} + {isNewChat && messages.length === 0 && onModelChange && ( +
+ Model: + +
+ )} + + {/* Model badge for existing chats */} + {(!isNewChat || messages.length > 0) && ( +
+ + {model} + +
+ )} + + {/* Messages */} +
+ {messages.length === 0 && ( +

+ Send a message to start chatting. +

+ )} + + {messages.map((message, messageIndex) => ( +
+
+
+ {message.parts.map((part, i) => { + if (part.type === "text") { + if (message.role === "assistant") { + return ( + + {part.text} + + ); + } + return {part.text}; + } + + if (part.type === "reasoning") { + return ( +
+ + Thinking... + +
+ {part.text} +
+
+ ); + } + + // Transient status parts — hide from rendered output + if ( + part.type === "data-turn-status" || + part.type === "data-background-context-injected" + ) { + return null; + } + + if (part.type === "data-research-progress") { + return ; + } + + if (part.type === "data-compaction") { + const data = (part as any).data as CompactionChunkData; + return ( +
+ {data.status === "compacting" ? "⏳" : "✂️"} + + {data.status === "compacting" + ? `Compacting conversation${ + data.totalTokens + ? ` (${data.totalTokens.toLocaleString()} tokens)` + : "" + }...` + : "Conversation compacted"} + +
+ ); + } + + if (part.type.startsWith("tool-")) { + return ; + } + + if (pending.isInjectionPoint(part)) { + const injectedMsgs = pending.getInjectedMessages(part); + if (injectedMsgs.length === 0) return null; + return ( +
+
+ {injectedMsgs.map((m) => ( +
+ {m.text} +
+ ))} +
+ injected mid-response +
+
+
+ ); + } + + if (part.type.startsWith("data-")) { + return ( +
+ {part.type} +
+                          {JSON.stringify((part as any).data, null, 2)}
+                        
+
+ ); + } + + return null; + })} +
+
+
+ ))} + + {status === "streaming" && messages[messages.length - 1]?.role !== "assistant" && ( +
+
+ Thinking... +
+
+ )} + + {pending.pending.map((msg) => ( +
+
+
+ {msg.text} +
+
+ + {msg.mode === "steering" + ? "Steering — waiting for injection point" + : "Queued for next turn"} + + {msg.mode === "queued" && status === "streaming" && ( + + )} +
+
+
+ ))} +
+ + {error && ( +
+ {error.message} +
+ )} + + {/* Debug panel */} + + + { + e.preventDefault(); + if (!input.trim()) return; + if (status !== "streaming") { + turnCounter.current++; + sendTimestamp.current = Date.now(); + } + pending.steer(input); + setInput(""); + }} + className="shrink-0 border-t border-gray-200 bg-white p-4" + > +
+ setInput(e.target.value)} + placeholder="Type a message..." + className="flex-1 rounded-lg border border-gray-300 px-3 py-2 text-sm outline-none focus:border-blue-500 focus:ring-1 focus:ring-blue-500" + /> + + {status === "streaming" && ( + + )} + {status === "streaming" && ( + + )} +
+ +
+ ); +} diff --git a/references/ai-chat/src/lib/chat-tools.ts b/references/ai-chat/src/lib/chat-tools.ts new file mode 100644 index 00000000000..c1ee18b9f2c --- /dev/null +++ b/references/ai-chat/src/lib/chat-tools.ts @@ -0,0 +1,240 @@ +import { ai, chat } from "@trigger.dev/sdk/ai"; +import { schemaTask } from "@trigger.dev/sdk"; +import { tool, generateId } from "ai"; +import type { InferUITools, UIDataTypes, UIMessage } from "ai"; +import { z } from "zod"; +import os from "node:os"; +import TurndownService from "turndown"; + +const turndown = new TurndownService(); + +// Silence TS errors for Bun/Deno global checks +declare const Bun: unknown; +declare const Deno: unknown; + +export const inspectEnvironment = tool({ + description: + "Inspect the current execution environment. Returns runtime info (Node.js/Bun/Deno version), " + + "OS details, CPU architecture, memory usage, environment variables, and platform metadata.", + inputSchema: z.object({}), + execute: async () => { + const memUsage = process.memoryUsage(); + + return { + runtime: { + name: typeof Bun !== "undefined" ? "bun" : typeof Deno !== "undefined" ? "deno" : "node", + version: process.version, + versions: { + v8: process.versions.v8, + openssl: process.versions.openssl, + modules: process.versions.modules, + }, + }, + os: { + platform: process.platform, + arch: process.arch, + release: os.release(), + type: os.type(), + hostname: os.hostname(), + uptime: `${Math.floor(os.uptime())}s`, + }, + cpus: { + count: os.cpus().length, + model: os.cpus()[0]?.model, + }, + memory: { + total: `${Math.round(os.totalmem() / 1024 / 1024)}MB`, + free: `${Math.round(os.freemem() / 1024 / 1024)}MB`, + process: { + rss: `${Math.round(memUsage.rss / 1024 / 1024)}MB`, + heapUsed: `${Math.round(memUsage.heapUsed / 1024 / 1024)}MB`, + heapTotal: `${Math.round(memUsage.heapTotal / 1024 / 1024)}MB`, + }, + }, + env: { + NODE_ENV: process.env.NODE_ENV, + TZ: process.env.TZ ?? Intl.DateTimeFormat().resolvedOptions().timeZone, + LANG: process.env.LANG, + }, + process: { + pid: process.pid, + cwd: process.cwd(), + execPath: process.execPath, + argv: process.argv.slice(0, 3), + }, + }; + }, +}); + +export const webFetch = tool({ + description: + "Fetch a URL and return the response as text. " + + "Use this to retrieve web pages, APIs, or any HTTP resource.", + inputSchema: z.object({ + url: z.string().url().describe("The URL to fetch"), + }), + execute: async ({ url }) => { + const latency = Number(process.env.WEBFETCH_LATENCY_MS); + if (latency > 0) { + await new Promise((r) => setTimeout(r, latency)); + } + + const response = await fetch(url); + let text = await response.text(); + const contentType = response.headers.get("content-type") ?? ""; + + if (contentType.includes("html")) { + text = turndown.turndown(text); + } + + return { + status: response.status, + contentType, + body: text.slice(0, 2000), + truncated: text.length > 2000, + }; + }, +}); + +const deepResearchTask = schemaTask({ + id: "deep-research", + description: + "Research a topic by fetching multiple URLs and synthesizing the results. " + + "Streams progress updates to the chat as it works.", + schema: z.object({ + query: z.string().describe("The research query or topic"), + urls: z.array(z.string().url()).describe("URLs to fetch and analyze"), + }), + run: async ({ query, urls }) => { + const partId = generateId(); + const results: { url: string; status: number; snippet: string }[] = []; + + for (let i = 0; i < urls.length; i++) { + const url = urls[i]!; + + const { waitUntilComplete } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-research-progress", + id: partId, + data: { + status: "fetching" as const, + query, + current: i + 1, + total: urls.length, + currentUrl: url, + completedUrls: results.map((r) => r.url), + }, + }); + }, + }); + await waitUntilComplete(); + + try { + const response = await fetch(url); + let text = await response.text(); + const contentType = response.headers.get("content-type") ?? ""; + + if (contentType.includes("html")) { + text = turndown.turndown(text); + } + + results.push({ + url, + status: response.status, + snippet: text.slice(0, 500), + }); + } catch (err) { + results.push({ + url, + status: 0, + snippet: `Error: ${err instanceof Error ? err.message : String(err)}`, + }); + } + } + + const { waitUntilComplete: waitForDone } = chat.stream.writer({ + target: "root", + execute: ({ write }) => { + write({ + type: "data-research-progress", + id: partId, + data: { + status: "done" as const, + query, + current: urls.length, + total: urls.length, + completedUrls: results.map((r) => r.url), + }, + }); + }, + }); + await waitForDone(); + + return { query, results }; + }, +}); + +/** Task-backed tool: AI SDK `tool()` for shape/types; `ai.toolExecute` for Trigger subtask + metadata. */ +export const deepResearch = tool({ + description: deepResearchTask.description ?? "", + inputSchema: deepResearchTask.schema!, + execute: ai.toolExecute(deepResearchTask), +}); + +const POSTHOG_API_KEY = process.env.POSTHOG_API_KEY; +const POSTHOG_PROJECT_ID = process.env.POSTHOG_PROJECT_ID; +const POSTHOG_HOST = process.env.POSTHOG_HOST ?? "https://eu.posthog.com"; + +export const posthogQuery = tool({ + description: + "Query PostHog analytics using HogQL. Use this to answer questions about events, " + + "pageviews, user activity, feature flag usage, or any product analytics question. " + + "Write a HogQL query (SQL-like syntax over PostHog events).", + inputSchema: z.object({ + query: z + .string() + .describe( + "HogQL query, e.g. SELECT event, count() FROM events WHERE timestamp > now() - interval 1 day GROUP BY event ORDER BY count() DESC LIMIT 10" + ), + }), + execute: async ({ query }) => { + if (!POSTHOG_API_KEY || !POSTHOG_PROJECT_ID) { + return { error: "PostHog not configured. Set POSTHOG_API_KEY and POSTHOG_PROJECT_ID." }; + } + const response = await fetch(`${POSTHOG_HOST}/api/projects/${POSTHOG_PROJECT_ID}/query/`, { + method: "POST", + headers: { + Authorization: `Bearer ${POSTHOG_API_KEY}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ query: { kind: "HogQLQuery", query } }), + }); + + if (!response.ok) { + const text = await response.text(); + return { error: `PostHog API error ${response.status}: ${text.slice(0, 500)}` }; + } + + const data = await response.json(); + return { + columns: data.columns, + results: data.results?.slice(0, 50), + rowCount: data.results?.length ?? 0, + }; + }, +}); + +/** Tool set passed to `streamText` for the main `chat.task` run (includes PostHog). */ +export const chatTools = { + inspectEnvironment, + webFetch, + deepResearch, + posthogQuery, +}; + +type ChatToolSet = typeof chatTools; + +export type ChatUiTools = InferUITools; +export type ChatUiMessage = UIMessage; diff --git a/references/ai-chat/src/lib/models.ts b/references/ai-chat/src/lib/models.ts new file mode 100644 index 00000000000..77c2ea1621d --- /dev/null +++ b/references/ai-chat/src/lib/models.ts @@ -0,0 +1,10 @@ +export const MODEL_OPTIONS = [ + "gpt-4o-mini", + "gpt-4o", + "claude-sonnet-4-6", + "claude-opus-4-6", +]; + +export const DEFAULT_MODEL = "claude-sonnet-4-6"; + +export const REASONING_MODELS = new Set(["claude-opus-4-6"]); diff --git a/references/ai-chat/src/lib/prisma.ts b/references/ai-chat/src/lib/prisma.ts new file mode 100644 index 00000000000..5e78334aa82 --- /dev/null +++ b/references/ai-chat/src/lib/prisma.ts @@ -0,0 +1,15 @@ +import { PrismaPg } from "@prisma/adapter-pg"; +import { PrismaClient } from "../../lib/generated/prisma/client"; + +const globalForPrisma = globalThis as unknown as { prisma: PrismaClient | undefined }; + +function createClient() { + const adapter = new PrismaPg({ connectionString: process.env.DATABASE_URL! }); + return new PrismaClient({ adapter }); +} + +export const prisma = globalForPrisma.prisma ?? createClient(); + +if (process.env.NODE_ENV !== "production") { + globalForPrisma.prisma = prisma; +} diff --git a/references/ai-chat/src/trigger/chat.ts b/references/ai-chat/src/trigger/chat.ts new file mode 100644 index 00000000000..d3527de0aa5 --- /dev/null +++ b/references/ai-chat/src/trigger/chat.ts @@ -0,0 +1,758 @@ +import { chat, type ChatTaskWirePayload } from "@trigger.dev/sdk/ai"; +import { logger, task, prompts } from "@trigger.dev/sdk"; +import { + streamText, + generateText, + generateObject, + stepCountIs, + generateId, + createProviderRegistry, +} from "ai"; +import type { LanguageModel, LanguageModelUsage, UIMessage } from "ai"; +import { openai } from "@ai-sdk/openai"; +import { anthropic } from "@ai-sdk/anthropic"; +import { z } from "zod"; +import { PrismaPg } from "@prisma/adapter-pg"; +import { PrismaClient } from "../../lib/generated/prisma/client"; +import { + chatTools, + deepResearch, + inspectEnvironment, + webFetch, + type ChatUiMessage, +} from "@/lib/chat-tools"; + +const adapter = new PrismaPg({ connectionString: process.env.DATABASE_URL! }); +const prisma = new PrismaClient({ adapter }); + +/** Prisma `messages` JSON column — use write-side type for updates (not `JsonValue` from reads). */ +export type ChatMessagesForWrite = NonNullable< + Parameters[0]["data"] +>["messages"]; + +import { DEFAULT_MODEL, REASONING_MODELS } from "@/lib/models"; + +function textFromFirstPart(message: UIMessage): string { + const p = message.parts?.[0]; + return p?.type === "text" ? p.text : ""; +} +const COMPACT_AFTER_TOKENS = Number(process.env.COMPACT_AFTER_TOKENS) || 80_000; + +const registry = createProviderRegistry({ openai, anthropic }); + +type RegistryLanguageModelId = Parameters[0]; + +function registryLanguageModel( + id: string | undefined, + fallback: RegistryLanguageModelId +): LanguageModel { + return registry.languageModel((id ?? fallback) as RegistryLanguageModelId); +} + +// #region Managed prompts — versioned, overridable from dashboard +const compactionPrompt = prompts.define({ + id: "ai-chat-compaction", + model: "openai:gpt-4o-mini" satisfies RegistryLanguageModelId, + content: `You are a conversation compactor. You will receive a transcript of a multi-turn conversation between a user and an assistant. + +Produce a concise summary that captures: +- The topics discussed and questions asked +- Any key facts, answers, or decisions reached +- Important context needed to continue the conversation naturally + +Write in third person (e.g. "The user asked about..." / "The assistant explained..."). +Keep it under 300 words. Do not include greetings or filler.`, +}); + +const systemPrompt = prompts.define({ + id: "ai-chat-system", + model: "openai:gpt-4o" satisfies RegistryLanguageModelId, + config: { temperature: 0.7 }, + variables: z.object({ name: z.string(), plan: z.string() }), + content: `You are a helpful AI assistant for {{name}} on the {{plan}} plan. + +## Guidelines +- Be concise and friendly. Prefer short, direct answers unless the user asks for detail. +- When using tools, explain what you're doing briefly before invoking them. +- If you don't know something, say so — don't make things up. + +## Capabilities +You can inspect the execution environment, fetch web pages, and perform multi-URL deep research. +When the user asks you to research a topic, use the deep research tool with relevant URLs. + +## Tone +- Match the user's formality level. If they're casual, be casual back. +- Use markdown formatting for code blocks, lists, and structured output. +- Keep responses under a few paragraphs unless the user asks for more.`, +}); + +const selfReviewPrompt = prompts.define({ + id: "ai-chat-self-review", + model: "openai:gpt-4o-mini" satisfies RegistryLanguageModelId, + content: `You are a conversation quality reviewer. Analyze the assistant's most recent response and provide structured feedback. + +Focus on: +- Whether the response actually answered the user's question +- Missed opportunities to use tools or provide more detail +- Tone mismatches (too formal, too casual, etc.) +- Factual claims that should have been verified with tools + +Be concise. Only flag issues worth fixing — don't nitpick.`, +}); +// #endregion + +// #region Models and helpers +const MODELS: Record LanguageModel> = { + "gpt-4o-mini": () => openai("gpt-4o-mini"), + "gpt-4o": () => openai("gpt-4o"), + "claude-sonnet-4-6": () => anthropic("claude-sonnet-4-6"), + "claude-opus-4-6": () => anthropic("claude-opus-4-6"), +}; + +function getModel(modelId?: string): LanguageModel { + const factory = MODELS[modelId ?? DEFAULT_MODEL]; + if (!factory) return MODELS[DEFAULT_MODEL]!(); + return factory(); +} + +const DEFAULT_REGISTRY_MODEL_ID = "anthropic:claude-sonnet-4-6" as const satisfies RegistryLanguageModelId; + +function languageModelForChatTurn(modelOverride: string | null | undefined): LanguageModel { + if (modelOverride) { + return getModel(modelOverride); + } + return registryLanguageModel(chat.prompt().model, DEFAULT_REGISTRY_MODEL_ID); +} + +function useExtendedThinking(modelOverride: string | null | undefined): boolean { + if (modelOverride && REASONING_MODELS.has(modelOverride)) { + return true; + } + const promptModel = chat.prompt().model; + return promptModel != null && promptModel.includes("claude-opus-4-6"); +} +// #endregion + +// #region Per-run state — chat.local persists across turns in the same run +const userContext = chat.local<{ + userId: string; + name: string; + plan: "free" | "pro"; + preferredModel: string | null; + messageCount: number; +}>({ id: "userContext" }); +// #endregion + +// ============================================================================ +// chat.task — the main chat agent +// ============================================================================ + +export const aiChat = chat + .withUIMessage({ + streamOptions: { + sendReasoning: true, + onError: (error) => { + logger.error("Stream error", { error }); + if (error instanceof Error && error.message.includes("rate limit")) { + return "Rate limited — please wait a moment and try again."; + } + return "Something went wrong. Please try again."; + }, + }, + }) + .task({ + id: "ai-chat", + clientDataSchema: z.object({ model: z.string().optional(), userId: z.string() }), + idleTimeoutInSeconds: 60, + chatAccessTokenTTL: "1m", + + // #region Compaction — automatic context window management + compaction: { + shouldCompact: ({ totalTokens }) => (totalTokens ?? 0) > COMPACT_AFTER_TOKENS, + summarize: async ({ messages }) => { + const resolved = await compactionPrompt.resolve({}); + return generateText({ + model: registryLanguageModel(resolved.model, "openai:gpt-4o-mini"), + messages: [...messages, { role: "user" as const, content: resolved.text }], + ...resolved.toAISDKTelemetry(), + }).then((r) => r.text); + }, + compactUIMessages: ({ uiMessages, summary }) => { + return [ + { + id: generateId(), + role: "assistant" as const, + parts: [{ type: "text" as const, text: `[Conversation summary]\n\n${summary}` }], + }, + ...uiMessages.slice(-2), + ]; + }, + }, + // #endregion + + // #region Pending messages — user can steer the agent mid-response + pendingMessages: { + shouldInject: ({ steps }) => steps.length > 0, + prepare: ({ messages }) => + messages.length === 1 + ? [{ role: "user" as const, content: textFromFirstPart(messages[0]!) }] + : [ + { + role: "user" as const, + content: `The user sent ${messages.length + } messages while you were working:\n\n${messages + .map((m, i) => `${i + 1}. ${textFromFirstPart(m)}`) + .join("\n")}`, + }, + ], + }, + // #endregion + + // #region prepareMessages — runs before every LLM call + prepareMessages: ({ messages, reason }) => { + // Add Anthropic cache breaks to the last message for prompt caching. + if (messages.length === 0) return messages; + const last = messages[messages.length - 1]!; + return [ + ...messages.slice(0, -1), + { + ...last, + providerOptions: { + ...last.providerOptions, + anthropic: { + ...(last.providerOptions?.anthropic as Record | undefined), + cacheControl: { type: "ephemeral" }, + }, + }, + }, + ]; + }, + // #endregion + + // --- Lifecycle hooks --- + + // #region onPreload — eagerly initialize before the user's first message + onPreload: async ({ chatId, runId, chatAccessToken, clientData }) => { + if (!clientData) return; + const user = await prisma.user.upsert({ + where: { id: clientData.userId }, + create: { id: clientData.userId, name: "User" }, + update: {}, + }); + userContext.init({ + userId: user.id, + name: user.name, + plan: user.plan as "free" | "pro", + preferredModel: user.preferredModel, + messageCount: user.messageCount, + }); + + const resolved = await systemPrompt.resolve({ + name: user.name, + plan: user.plan as string, + }); + chat.prompt.set(resolved); + + await prisma.chat.upsert({ + where: { id: chatId }, + create: { + id: chatId, + title: "New chat", + userId: user.id, + model: clientData?.model ?? DEFAULT_MODEL, + }, + update: {}, + }); + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + // #endregion + + // #region onChatStart — fallback init when not preloaded + onChatStart: async ({ + chatId, + runId, + chatAccessToken, + clientData, + continuation, + preloaded, + }) => { + if (preloaded) return; + + const user = await prisma.user.upsert({ + where: { id: clientData.userId }, + create: { id: clientData.userId, name: "User" }, + update: {}, + }); + userContext.init({ + userId: user.id, + name: user.name, + plan: user.plan as "free" | "pro", + preferredModel: user.preferredModel, + messageCount: user.messageCount, + }); + + const resolved = await systemPrompt.resolve({ + name: user.name, + plan: user.plan as string, + }); + chat.prompt.set(resolved); + + if (!continuation) { + await prisma.chat.upsert({ + where: { id: chatId }, + create: { + id: chatId, + title: "New chat", + userId: user.id, + model: clientData.model ?? DEFAULT_MODEL, + }, + update: {}, + }); + } + + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken }, + update: { runId, publicAccessToken: chatAccessToken }, + }); + }, + // #endregion + + // #region onCompacted + onCompacted: async ({ summary, totalTokens, messageCount, chatId, turn }) => { + logger.info("Conversation compacted", { + chatId, + turn, + totalTokens, + messageCount, + summaryLength: summary.length, + }); + }, + // #endregion + + // #region onTurnStart — persist messages + write status via writer + onTurnStart: async ({ chatId, uiMessages, writer }) => { + writer.write({ type: "data-turn-status", data: { status: "preparing" } }); + chat.defer( + prisma.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages as unknown as ChatMessagesForWrite }, + }) + ); + }, + // #endregion + + // #region onTurnComplete — persist + background self-review via chat.inject() + onTurnComplete: async ({ + chatId, + uiMessages, + messages, + runId, + chatAccessToken, + lastEventId, + }) => { + await prisma.chat.update({ + where: { id: chatId }, + data: { messages: uiMessages as unknown as ChatMessagesForWrite }, + }); + await prisma.chatSession.upsert({ + where: { id: chatId }, + create: { id: chatId, runId, publicAccessToken: chatAccessToken, lastEventId }, + update: { runId, publicAccessToken: chatAccessToken, lastEventId }, + }); + + // Background self-review — a cheap model critiques the response and + // injects coaching into the conversation before the next user message. + chat.defer( + (async () => { + const resolved = await selfReviewPrompt.resolve({}); + + const review = await generateObject({ + model: registryLanguageModel(resolved.model, "openai:gpt-4o-mini"), + ...resolved.toAISDKTelemetry(), + system: resolved.text, + prompt: `Here is the conversation to review:\n\n${messages + .filter((m) => m.role === "user" || m.role === "assistant") + .map( + (m) => + `${m.role}: ${typeof m.content === "string" + ? m.content + : Array.isArray(m.content) + ? m.content + .filter((p: any) => p.type === "text") + .map((p: any) => p.text) + .join("") + : "" + }` + ) + .join("\n\n")}`, + schema: z.object({ + needsImprovement: z.boolean().describe("Whether the response needs improvement"), + suggestions: z + .array(z.string()) + .describe("Specific actionable suggestions for the next response"), + missedTools: z + .array(z.string()) + .describe("Tool names the assistant should have used but didn't"), + }), + }); + + const parts = []; + if (review.object.suggestions.length > 0) { + parts.push( + `Suggestions:\n${review.object.suggestions.map((s) => `- ${s}`).join("\n")}` + ); + } + if (review.object.missedTools.length > 0) { + parts.push(`Consider using: ${review.object.missedTools.join(", ")}`); + } + + chat.inject([ + { + role: "user" as const, + content: review.object.needsImprovement + ? `[Self-review of your previous response]\n\n${parts.join( + "\n\n" + )}\n\nApply these improvements naturally in your next response.` + : `[Self-review of your previous response]\n\nYour previous response was good. No changes needed.`, + }, + ]); + })() + ); + }, + // #endregion + + // #region run — just return streamText(), chat.task handles everything else + run: async ({ messages, clientData, stopSignal }) => { + userContext.messageCount++; + if (clientData?.model) { + userContext.preferredModel = clientData.model; + } + + const modelOverride = clientData?.model ?? userContext.preferredModel ?? undefined; + const useReasoning = useExtendedThinking(modelOverride); + + return streamText({ + ...chat.toStreamTextOptions({ + registry, + telemetry: clientData?.userId ? { userId: clientData.userId } : undefined, + }), + model: languageModelForChatTurn(modelOverride), + messages: messages, + tools: chatTools, + stopWhen: stepCountIs(10), + abortSignal: stopSignal, + providerOptions: { + openai: { user: clientData?.userId }, + anthropic: { + metadata: { user_id: clientData?.userId }, + ...(useReasoning ? { thinking: { type: "enabled", budgetTokens: 10000 } } : {}), + }, + }, + }); + }, + // #endregion + }); + +// #region Raw task variant — same functionality using composable primitives +async function initUserContext(userId: string, chatId: string, model?: string) { + const user = await prisma.user.upsert({ + where: { id: userId }, + create: { id: userId, name: "User" }, + update: {}, + }); + userContext.init({ + userId: user.id, + name: user.name, + plan: user.plan as "free" | "pro", + preferredModel: user.preferredModel, + messageCount: user.messageCount, + }); + + const resolved = await systemPrompt.resolve({ + name: user.name, + plan: user.plan as string, + }); + chat.prompt.set(resolved); + + await prisma.chat.upsert({ + where: { id: chatId }, + create: { id: chatId, title: "New chat", userId: user.id, model: model ?? DEFAULT_MODEL }, + update: {}, + }); +} + +export const aiChatRaw = task({ + id: "ai-chat-raw", + run: async (payload: ChatTaskWirePayload, { signal: runSignal }) => { + let currentPayload = payload; + const clientData = payload.metadata as { userId: string; model?: string } | undefined; + + if (currentPayload.trigger === "preload") { + if (clientData) { + await initUserContext(clientData.userId, currentPayload.chatId, clientData.model); + } + + const result = await chat.messages.waitWithIdleTimeout({ + idleTimeoutInSeconds: payload.idleTimeoutInSeconds ?? 60, + timeout: "1h", + spanName: "waiting for first message", + }); + if (!result.ok) return; + currentPayload = result.output; + } + + const currentClientData = (currentPayload.metadata ?? clientData) as + | { userId: string; model?: string } + | undefined; + + if (!userContext.userId && currentClientData) { + await initUserContext( + currentClientData.userId, + currentPayload.chatId, + currentClientData.model + ); + } + + const stop = chat.createStopSignal(); + const conversation = new chat.MessageAccumulator({ + compaction: { + shouldCompact: ({ totalTokens }) => (totalTokens ?? 0) > COMPACT_AFTER_TOKENS, + summarize: async ({ messages: msgs }) => { + const resolved = await compactionPrompt.resolve({}); + return generateText({ + model: registryLanguageModel(resolved.model, "openai:gpt-4o-mini"), + ...resolved.toAISDKTelemetry(), + messages: [...msgs, { role: "user" as const, content: resolved.text }], + }).then((r) => r.text); + }, + compactUIMessages: ({ summary }) => [ + { + id: generateId(), + role: "assistant" as const, + parts: [{ type: "text" as const, text: `[Summary]\n\n${summary}` }], + }, + ], + }, + pendingMessages: { + shouldInject: () => true, + prepare: ({ messages }) => [ + { + role: "user" as const, + content: [ + { + type: "text" as const, + text: `[User sent ${messages.length} message(s) while you were working]:\n${messages + .map((m) => textFromFirstPart(m)) + .join("\n")}`, + }, + ], + }, + ], + }, + }); + + for (let turn = 0; turn < 100; turn++) { + stop.reset(); + + const messages = await conversation.addIncoming( + currentPayload.messages, + currentPayload.trigger, + turn + ); + + const turnClientData = (currentPayload.metadata ?? currentClientData) as + | { userId: string; model?: string } + | undefined; + + userContext.messageCount++; + if (turnClientData?.model) { + userContext.preferredModel = turnClientData.model; + } + + const modelOverride = turnClientData?.model ?? userContext.preferredModel ?? undefined; + const useReasoning = useExtendedThinking(modelOverride); + const combinedSignal = AbortSignal.any([runSignal, stop.signal]); + + const steeringSub = chat.messages.on(async (msg) => { + const lastMsg = msg.messages?.[msg.messages.length - 1]; + if (lastMsg) await conversation.steerAsync(lastMsg); + }); + + const result = streamText({ + ...chat.toStreamTextOptions({ registry }), + model: languageModelForChatTurn(modelOverride), + messages: messages, + tools: { + inspectEnvironment, + webFetch, + deepResearch, + }, + stopWhen: stepCountIs(10), + abortSignal: combinedSignal, + providerOptions: { + openai: { user: turnClientData?.userId }, + anthropic: { + metadata: { user_id: turnClientData?.userId }, + ...(useReasoning ? { thinking: { type: "enabled", budgetTokens: 10000 } } : {}), + }, + }, + prepareStep: conversation.prepareStep(), + }); + + let response: UIMessage | undefined; + try { + response = await chat.pipeAndCapture(result, { signal: combinedSignal }); + } catch (error) { + if (error instanceof Error && error.name === "AbortError") { + if (runSignal.aborted) break; + } else { + throw error; + } + } finally { + steeringSub.off(); + } + + if (response) { + if (stop.signal.aborted && !runSignal.aborted) { + await conversation.addResponse(chat.cleanupAbortedParts(response)); + } else { + await conversation.addResponse(response); + } + } + + if (runSignal.aborted) break; + + let turnUsage: LanguageModelUsage | undefined; + try { + turnUsage = await result.totalUsage; + } catch { + /* non-fatal */ + } + await conversation.compactIfNeeded(turnUsage, { + chatId: currentPayload.chatId, + turn, + }); + + await prisma.chat.update({ + where: { id: currentPayload.chatId }, + data: { messages: conversation.uiMessages as unknown as ChatMessagesForWrite }, + }); + + if (userContext.hasChanged()) { + await prisma.user.update({ + where: { id: userContext.userId }, + data: { + messageCount: userContext.messageCount, + preferredModel: userContext.preferredModel, + }, + }); + } + + await chat.writeTurnComplete(); + + const next = await chat.messages.waitWithIdleTimeout({ + idleTimeoutInSeconds: 60, + timeout: "1h", + spanName: "waiting for next message", + }); + if (!next.ok) break; + currentPayload = next.output; + } + + stop.cleanup(); + }, +}); + +export const aiChatSession = task({ + id: "ai-chat-session", + run: async (payload: ChatTaskWirePayload, { signal }) => { + const clientData = payload.metadata as { userId: string; model?: string } | undefined; + + if (clientData) { + await initUserContext(clientData.userId, payload.chatId, clientData.model); + } + + const session = chat.createSession(payload, { + signal, + idleTimeoutInSeconds: payload.idleTimeoutInSeconds ?? 60, + timeout: "1h", + compaction: { + shouldCompact: ({ totalTokens }) => (totalTokens ?? 0) > COMPACT_AFTER_TOKENS, + summarize: async ({ messages: msgs }) => { + const resolved = await compactionPrompt.resolve({}); + return generateText({ + model: registryLanguageModel(resolved.model, "openai:gpt-4o-mini"), + ...resolved.toAISDKTelemetry(), + messages: [...msgs, { role: "user" as const, content: resolved.text }], + }).then((r) => r.text); + }, + compactUIMessages: ({ uiMessages, summary }) => [ + { + id: generateId(), + role: "assistant" as const, + parts: [{ type: "text" as const, text: `[Conversation summary]\n\n${summary}` }], + }, + ...uiMessages.slice(-4), + ], + }, + pendingMessages: { + shouldInject: () => true, + }, + }); + + for await (const turn of session) { + const turnClientData = (turn.clientData ?? clientData) as + | { userId: string; model?: string } + | undefined; + + userContext.messageCount++; + if (turnClientData?.model) userContext.preferredModel = turnClientData.model; + + const modelOverride = turnClientData?.model ?? userContext.preferredModel ?? undefined; + const useReasoning = useExtendedThinking(modelOverride); + + const result = streamText({ + ...chat.toStreamTextOptions({ registry }), + model: languageModelForChatTurn(modelOverride), + messages: turn.messages, + tools: { + inspectEnvironment, + webFetch, + deepResearch, + }, + stopWhen: stepCountIs(10), + abortSignal: turn.signal, + providerOptions: { + openai: { user: turnClientData?.userId }, + anthropic: { + metadata: { user_id: turnClientData?.userId }, + ...(useReasoning ? { thinking: { type: "enabled", budgetTokens: 10000 } } : {}), + }, + }, + }); + + await turn.complete(result); + + await prisma.chat.update({ + where: { id: turn.chatId }, + data: { messages: turn.uiMessages as unknown as ChatMessagesForWrite }, + }); + + if (userContext.hasChanged()) { + await prisma.user.update({ + where: { id: userContext.userId }, + data: { + messageCount: userContext.messageCount, + preferredModel: userContext.preferredModel, + }, + }); + } + } + }, +}); +// #endregion diff --git a/references/ai-chat/trigger.config.ts b/references/ai-chat/trigger.config.ts new file mode 100644 index 00000000000..f9194d078b7 --- /dev/null +++ b/references/ai-chat/trigger.config.ts @@ -0,0 +1,15 @@ +import { defineConfig } from "@trigger.dev/sdk"; +import { prismaExtension } from "@trigger.dev/build/extensions/prisma"; + +export default defineConfig({ + project: process.env.TRIGGER_PROJECT_REF!, + dirs: ["./src/trigger"], + maxDuration: 3600, + build: { + extensions: [ + prismaExtension({ + mode: "modern", + }), + ], + }, +}); diff --git a/references/ai-chat/tsconfig.json b/references/ai-chat/tsconfig.json new file mode 100644 index 00000000000..c1334095f87 --- /dev/null +++ b/references/ai-chat/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2017", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": ["./src/*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "exclude": ["node_modules"] +} diff --git a/references/hello-world/src/trigger/triggerAndSubscribe.ts b/references/hello-world/src/trigger/triggerAndSubscribe.ts new file mode 100644 index 00000000000..347319159ce --- /dev/null +++ b/references/hello-world/src/trigger/triggerAndSubscribe.ts @@ -0,0 +1,276 @@ +import { logger, schemaTask, task, tasks } from "@trigger.dev/sdk"; +import { z } from "zod"; +import { setTimeout } from "timers/promises"; + +// A simple child task that does some work and returns a result +const childWork = schemaTask({ + id: "child-work", + schema: z.object({ + label: z.string(), + delayMs: z.number().default(1000), + shouldFail: z.boolean().default(false), + }), + run: async ({ label, delayMs, shouldFail }) => { + logger.info(`Child task "${label}" starting`, { delayMs, shouldFail }); + await setTimeout(delayMs); + if (shouldFail) { + throw new Error(`Child task "${label}" intentionally failed`); + } + logger.info(`Child task "${label}" done`); + return { label, completedAt: new Date().toISOString() }; + }, +}); + +// Test 1: Basic triggerAndSubscribe — single child task +export const testTriggerAndSubscribe = task({ + id: "test-trigger-and-subscribe", + run: async () => { + logger.info("Starting single triggerAndSubscribe test"); + + const result = await childWork + .triggerAndSubscribe({ label: "single", delayMs: 2000 }) + .unwrap(); + + logger.info("Got result", { result }); + return result; + }, +}); + +// Test 2: Parallel triggerAndSubscribe — multiple children concurrently +export const testParallelSubscribe = task({ + id: "test-parallel-subscribe", + run: async () => { + logger.info("Starting parallel triggerAndSubscribe test"); + + // This would fail with triggerAndWait due to preventMultipleWaits + const [result1, result2, result3] = await Promise.all([ + childWork.triggerAndSubscribe({ label: "parallel-1", delayMs: 2000 }).unwrap(), + childWork.triggerAndSubscribe({ label: "parallel-2", delayMs: 3000 }).unwrap(), + childWork.triggerAndSubscribe({ label: "parallel-3", delayMs: 1000 }).unwrap(), + ]); + + logger.info("All parallel tasks complete", { result1, result2, result3 }); + return { result1, result2, result3 }; + }, +}); + +// Test 3: Abort with cancelOnAbort: true (default) — child run gets cancelled +export const testAbortWithCancel = task({ + id: "test-abort-with-cancel", + run: async () => { + logger.info("Starting abort test (cancelOnAbort: true) — child should be cancelled"); + + const controller = new AbortController(); + + // Abort after 2 seconds + setTimeout(2000).then(() => { + logger.info("Firing abort signal"); + controller.abort(); + }); + + try { + const result = await childWork + .triggerAndSubscribe( + { label: "will-be-cancelled", delayMs: 10000 }, + { signal: controller.signal } + ) + .unwrap(); + + logger.error("Unexpected: task completed without being cancelled", { result }); + return { aborted: false, childCancelled: false, result }; + } catch (error) { + logger.info("Expected: subscription aborted and child cancelled", { + error: error instanceof Error ? error.message : String(error), + }); + return { aborted: true, childCancelled: true }; + } + }, +}); + +// Test 4: Abort with cancelOnAbort: false — child run keeps running +export const testAbortWithoutCancel = task({ + id: "test-abort-without-cancel", + run: async () => { + logger.info("Starting abort test (cancelOnAbort: false) — child should keep running"); + + const controller = new AbortController(); + + // Abort after 2 seconds + setTimeout(2000).then(() => { + logger.info("Firing abort signal"); + controller.abort(); + }); + + try { + const result = await childWork + .triggerAndSubscribe( + { label: "keeps-running", delayMs: 5000 }, + { signal: controller.signal, cancelOnAbort: false } + ) + .unwrap(); + + logger.error("Unexpected: task completed (subscription should have been aborted)", { + result, + }); + return { aborted: false, result }; + } catch (error) { + logger.info("Expected: subscription aborted but child still running", { + error: error instanceof Error ? error.message : String(error), + }); + // The child task should still complete on its own — we just stopped listening + return { aborted: true, childCancelled: false }; + } + }, +}); + +// Test 5: Abort signal already aborted before calling triggerAndSubscribe +export const testAbortAlreadyAborted = task({ + id: "test-abort-already-aborted", + run: async () => { + logger.info("Starting pre-aborted signal test"); + + const controller = new AbortController(); + controller.abort("pre-aborted"); + + try { + const result = await childWork + .triggerAndSubscribe( + { label: "should-not-run", delayMs: 1000 }, + { signal: controller.signal } + ) + .unwrap(); + + logger.error("Unexpected: task completed", { result }); + return { aborted: false }; + } catch (error) { + logger.info("Expected: immediately aborted", { + error: error instanceof Error ? error.message : String(error), + }); + return { aborted: true }; + } + }, +}); + +// Test 6: Standalone tasks.triggerAndSubscribe +export const testStandaloneSubscribe = task({ + id: "test-standalone-subscribe", + run: async () => { + logger.info("Starting standalone triggerAndSubscribe test"); + + const result = await tasks + .triggerAndSubscribe("child-work", { + label: "standalone", + delayMs: 1500, + }) + .unwrap(); + + logger.info("Got result", { result }); + return result; + }, +}); + +// Test 7: Result object without .unwrap() — success case +export const testResultSuccess = task({ + id: "test-result-success", + run: async () => { + const result = await childWork.triggerAndSubscribe({ + label: "result-success", + delayMs: 1000, + }); + + logger.info("Result object", { + ok: result.ok, + id: result.id, + taskIdentifier: result.taskIdentifier, + }); + + if (result.ok) { + logger.info("Success output", { output: result.output }); + return { ok: true, output: result.output, id: result.id }; + } else { + logger.error("Unexpected failure", { error: result.error }); + return { ok: false, error: String(result.error) }; + } + }, +}); + +// Test 8: Result object without .unwrap() — failure case +export const testResultFailure = task({ + id: "test-result-failure", + retry: { maxAttempts: 1 }, + run: async () => { + const result = await childWork.triggerAndSubscribe({ + label: "result-failure", + delayMs: 500, + shouldFail: true, + }); + + logger.info("Result object", { + ok: result.ok, + id: result.id, + taskIdentifier: result.taskIdentifier, + }); + + if (result.ok) { + logger.error("Unexpected success", { output: result.output }); + return { ok: true, output: result.output }; + } else { + logger.info("Expected failure", { error: String(result.error) }); + return { ok: false, error: String(result.error), id: result.id }; + } + }, +}); + +// Test 9: .unwrap() on a failed child — should throw SubtaskUnwrapError +export const testUnwrapFailure = task({ + id: "test-unwrap-failure", + retry: { maxAttempts: 1 }, + run: async () => { + try { + const output = await childWork + .triggerAndSubscribe({ + label: "unwrap-failure", + delayMs: 500, + shouldFail: true, + }) + .unwrap(); + + logger.error("Unexpected: unwrap succeeded", { output }); + return { threw: false, output }; + } catch (error) { + logger.info("Expected: unwrap threw", { + name: error instanceof Error ? error.name : "unknown", + message: error instanceof Error ? error.message : String(error), + }); + return { + threw: true, + errorName: error instanceof Error ? error.name : "unknown", + errorMessage: error instanceof Error ? error.message : String(error), + }; + } + }, +}); + +// Test 10: Parallel with mixed success/failure +export const testParallelMixed = task({ + id: "test-parallel-mixed", + retry: { maxAttempts: 1 }, + run: async () => { + const [success, failure] = await Promise.all([ + childWork.triggerAndSubscribe({ label: "mixed-success", delayMs: 1000 }), + childWork.triggerAndSubscribe({ label: "mixed-failure", delayMs: 500, shouldFail: true }), + ]); + + logger.info("Results", { + success: { ok: success.ok, output: success.ok ? success.output : null }, + failure: { ok: failure.ok, error: !failure.ok ? String(failure.error) : null }, + }); + + return { + successOk: success.ok, + successOutput: success.ok ? success.output : null, + failureOk: failure.ok, + failureError: !failure.ok ? String(failure.error) : null, + }; + }, +});