diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7d8a9d4..187fc4b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,7 @@
### Added
+- **Async fixture responses** — Fixture responses can now be sync or async functions that receive the request and return the response dynamically. Enables awaiting side effects (database writes, API calls) before constructing the response — eliminating race conditions in complex multi-turn E2E tests. Works with all providers, streaming, and convenience methods (`on()`, `onMessage()`, `onTurn()`). (Feature request by @5ebastianMeier, issue #154)
- **Snapshot-style recording** — When `X-Test-Id` is present, recorded fixtures are saved to `
+ Fixture responses can be functions — sync or async — that receive the request
+ and return the response dynamically. Use this when you need to await side effects, compute
+ responses based on request content, or inject runtime data into fixtures.
+
+ Wait for an external operation to complete before constructing the fixture response.
+ Eliminates race conditions in multi-turn E2E tests where entity creation happens
+ out-of-band.
+
+ Compute the response from the incoming request content. Useful for echo-style fixtures,
+ transformations, or conditional logic that goes beyond what match fields can express.
+ Tool-call cycle with hasToolResult
]
}
+
+
+
+ Dynamic / Async Responses
+
+ Async response with side-effect
+
+ mock.on(
+ { toolCallId: "call_create_entity" },
+ async (req) => {
+ const entity = await createEntityPromise;
+ return {
+ content: `Entity "${entity.name}" created!`,
+ toolCalls: [{
+ name: "next_step",
+ arguments: JSON.stringify({ entityId: entity.id }),
+ }],
+ };
+ },
+);Request-aware response
+
+ mock.onMessage("translate", (req) => {
+ const text = req.messages.at(-1)?.content ?? "";
+ return { content: `Translated: ${text.toUpperCase()}` };
+});Response Types
+ Dynamic responses: Responses can also be sync or async functions that + receive the request and return the response dynamically. See + Dynamic Responses on the Examples page. +
+Fixture responses can include optional fields to override auto-generated envelope values. diff --git a/docs/multi-turn/index.html b/docs/multi-turn/index.html index 1fa7890..922f340 100644 --- a/docs/multi-turn/index.html +++ b/docs/multi-turn/index.html @@ -182,6 +182,18 @@
+ Async fixture responses for race-free multi-turn tests. When a
+ multi-turn test depends on side effects between turns (database writes, entity creation,
+ external API calls), async fixture responses let you await those operations
+ before constructing the response — eliminating race conditions without
+ setTimeout hacks. See
+ Dynamic / Async Responses on the
+ Examples page.
+
The onTurn() convenience method combines turnIndex with a
diff --git a/src/__tests__/async-fixture-response.test.ts b/src/__tests__/async-fixture-response.test.ts
new file mode 100644
index 0000000..f4aa0e6
--- /dev/null
+++ b/src/__tests__/async-fixture-response.test.ts
@@ -0,0 +1,251 @@
+import { describe, it, expect, afterEach } from "vitest";
+import { LLMock } from "../llmock.js";
+import type { ChatCompletionRequest, SSEChunk } from "../types.js";
+
+function parseSSEChunks(body: string): SSEChunk[] {
+ return body
+ .split("\n\n")
+ .filter((line) => line.startsWith("data: ") && !line.includes("[DONE]"))
+ .map((line) => JSON.parse(line.slice(6)) as SSEChunk);
+}
+
+describe("async fixture response (function responses)", () => {
+ let mock: LLMock | null = null;
+
+ afterEach(async () => {
+ if (mock) {
+ await mock.stop();
+ mock = null;
+ }
+ });
+
+ it("resolves a sync function response", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "sync-fn" }, () => ({ content: "sync-factory-result" }));
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4o",
+ messages: [{ role: "user", content: "sync-fn" }],
+ stream: false,
+ }),
+ });
+
+ expect(res.status).toBe(200);
+ const json = await res.json();
+ expect(json.choices[0].message.content).toBe("sync-factory-result");
+ });
+
+ it("resolves an async function response", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "async-fn" }, async () => {
+ return { content: "async-factory-result" };
+ });
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4o",
+ messages: [{ role: "user", content: "async-fn" }],
+ stream: false,
+ }),
+ });
+
+ expect(res.status).toBe(200);
+ const json = await res.json();
+ expect(json.choices[0].message.content).toBe("async-factory-result");
+ });
+
+ it("receives the request object in the factory function", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "echo-model" }, (req: ChatCompletionRequest) => ({
+ content: `model=${req.model}`,
+ }));
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4o-mini",
+ messages: [{ role: "user", content: "echo-model" }],
+ stream: false,
+ }),
+ });
+
+ expect(res.status).toBe(200);
+ const json = await res.json();
+ expect(json.choices[0].message.content).toBe("model=gpt-4o-mini");
+ });
+
+ it("works with streaming responses from a factory", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "stream-fn" }, () => ({ content: "streamed-from-factory" }));
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4o",
+ messages: [{ role: "user", content: "stream-fn" }],
+ stream: true,
+ }),
+ });
+
+ expect(res.status).toBe(200);
+ const chunks = parseSSEChunks(await res.text());
+ const content = chunks.map((c) => c.choices?.[0]?.delta?.content ?? "").join("");
+ expect(content).toBe("streamed-from-factory");
+ });
+
+ it("works with onMessage convenience method", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.onMessage("convenience-fn", (req: ChatCompletionRequest) => ({
+ content: `msg-count=${req.messages.length}`,
+ }));
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4o",
+ messages: [
+ { role: "system", content: "you are helpful" },
+ { role: "user", content: "convenience-fn" },
+ ],
+ stream: false,
+ }),
+ });
+
+ expect(res.status).toBe(200);
+ const json = await res.json();
+ expect(json.choices[0].message.content).toBe("msg-count=2");
+ });
+
+ it("static response still works alongside function responses", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "static" }, { content: "plain-static" });
+ mock.on({ userMessage: "dynamic" }, () => ({ content: "from-function" }));
+ await mock.start();
+
+ const [staticRes, dynamicRes] = await Promise.all([
+ fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4o",
+ messages: [{ role: "user", content: "static" }],
+ stream: false,
+ }),
+ }),
+ fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4o",
+ messages: [{ role: "user", content: "dynamic" }],
+ stream: false,
+ }),
+ }),
+ ]);
+
+ expect(staticRes.status).toBe(200);
+ expect(dynamicRes.status).toBe(200);
+
+ const staticJson = await staticRes.json();
+ const dynamicJson = await dynamicRes.json();
+
+ expect(staticJson.choices[0].message.content).toBe("plain-static");
+ expect(dynamicJson.choices[0].message.content).toBe("from-function");
+ });
+
+ it("returns 500 when factory throws", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "boom" }, () => {
+ throw new Error("factory exploded");
+ });
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "boom" }],
+ stream: false,
+ }),
+ });
+
+ expect(res.status).toBe(500);
+ });
+
+ it("returns 500 when async factory rejects", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "reject" }, async () => {
+ throw new Error("async rejection");
+ });
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "reject" }],
+ stream: false,
+ }),
+ });
+
+ expect(res.status).toBe(500);
+ });
+
+ it("returns 500 when factory returns invalid response shape", async () => {
+ mock = new LLMock({ port: 0 });
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ mock.on({ userMessage: "bad" }, () => ({ notAValidField: true }) as any);
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "bad" }],
+ stream: false,
+ }),
+ });
+
+ expect(res.status).toBe(500);
+ });
+
+ it("works with async factory and streaming", async () => {
+ mock = new LLMock({ port: 0 });
+ mock.on({ userMessage: "async-stream" }, async () => {
+ await new Promise((r) => setTimeout(r, 10));
+ return { content: "async-streamed-result" };
+ });
+ await mock.start();
+
+ const res = await fetch(`${mock.url}/v1/chat/completions`, {
+ method: "POST",
+ headers: { "Content-Type": "application/json", Authorization: "Bearer test" },
+ body: JSON.stringify({
+ model: "gpt-4",
+ messages: [{ role: "user", content: "async-stream" }],
+ stream: true,
+ }),
+ });
+
+ expect(res.status).toBe(200);
+ const chunks = parseSSEChunks(await res.text());
+ const content = chunks.map((c) => c.choices?.[0]?.delta?.content ?? "").join("");
+ expect(content).toBe("async-streamed-result");
+ });
+});
diff --git a/src/bedrock-converse.ts b/src/bedrock-converse.ts
index 92bcab6..175622f 100644
--- a/src/bedrock-converse.ts
+++ b/src/bedrock-converse.ts
@@ -26,6 +26,7 @@ import {
isErrorResponse,
flattenHeaders,
getTestId,
+ resolveResponse,
} from "./helpers.js";
import { matchFixture } from "./router.js";
import { writeErrorResponse } from "./sse-writer.js";
@@ -659,7 +660,7 @@ export async function handleConverse(
return;
}
- const response = fixture.response;
+ const response = await resolveResponse(fixture, completionReq);
// Error response
if (isErrorResponse(response)) {
@@ -923,7 +924,7 @@ export async function handleConverseStream(
return;
}
- const response = fixture.response;
+ const response = await resolveResponse(fixture, completionReq);
const latency = fixture.latency ?? defaults.latency;
const chunkSize = Math.max(1, fixture.chunkSize ?? defaults.chunkSize);
diff --git a/src/bedrock.ts b/src/bedrock.ts
index 5bfb354..ea93f84 100644
--- a/src/bedrock.ts
+++ b/src/bedrock.ts
@@ -37,6 +37,7 @@ import {
isErrorResponse,
flattenHeaders,
getTestId,
+ resolveResponse,
} from "./helpers.js";
import { matchFixture } from "./router.js";
import { writeErrorResponse } from "./sse-writer.js";
@@ -472,7 +473,7 @@ export async function handleBedrock(
return;
}
- const response = fixture.response;
+ const response = await resolveResponse(fixture, completionReq);
// Error response
if (isErrorResponse(response)) {
@@ -1069,7 +1070,7 @@ export async function handleBedrockStream(
return;
}
- const response = fixture.response;
+ const response = await resolveResponse(fixture, completionReq);
const latency = fixture.latency ?? defaults.latency;
const chunkSize = Math.max(1, fixture.chunkSize ?? defaults.chunkSize);
diff --git a/src/cohere.ts b/src/cohere.ts
index deebd08..8704eb2 100644
--- a/src/cohere.ts
+++ b/src/cohere.ts
@@ -30,6 +30,7 @@ import {
isErrorResponse,
flattenHeaders,
getTestId,
+ resolveResponse,
} from "./helpers.js";
import { matchFixture } from "./router.js";
import { writeErrorResponse, delay, calculateDelay } from "./sse-writer.js";
@@ -863,7 +864,7 @@ export async function handleCohere(
return;
}
- const response = fixture.response;
+ const response = await resolveResponse(fixture, completionReq);
const latency = fixture.latency ?? defaults.latency;
const chunkSize = Math.max(1, fixture.chunkSize ?? defaults.chunkSize);
diff --git a/src/elevenlabs-audio.ts b/src/elevenlabs-audio.ts
index e5e274c..4d8e43c 100644
--- a/src/elevenlabs-audio.ts
+++ b/src/elevenlabs-audio.ts
@@ -6,6 +6,7 @@ import {
isErrorResponse,
FORMAT_TO_CONTENT_TYPE,
getTestId,
+ resolveResponse,
} from "./helpers.js";
import { matchFixture } from "./router.js";
import { writeErrorResponse } from "./sse-writer.js";
@@ -152,7 +153,7 @@ export async function handleElevenLabsAudio(
return;
}
- const response = fixture.response;
+ const response = await resolveResponse(fixture, syntheticReq);
// Error fixture
if (isErrorResponse(response)) {
diff --git a/src/embeddings.ts b/src/embeddings.ts
index 1b25103..93558aa 100644
--- a/src/embeddings.ts
+++ b/src/embeddings.ts
@@ -20,6 +20,7 @@ import {
buildEmbeddingResponse,
flattenHeaders,
getTestId,
+ resolveResponse,
} from "./helpers.js";
import { matchFixture } from "./router.js";
import { writeErrorResponse } from "./sse-writer.js";
@@ -152,7 +153,7 @@ export async function handleEmbeddings(
return;
if (fixture) {
- const response = fixture.response;
+ const response = await resolveResponse(fixture, syntheticReq);
// Error response
if (isErrorResponse(response)) {
diff --git a/src/fal-audio.ts b/src/fal-audio.ts
index 5c0a23b..3414f3a 100644
--- a/src/fal-audio.ts
+++ b/src/fal-audio.ts
@@ -1,7 +1,13 @@
import type http from "node:http";
import crypto from "node:crypto";
import type { AudioResponse, ChatCompletionRequest, Fixture, HandlerDefaults } from "./types.js";
-import { isAudioResponse, isErrorResponse, FORMAT_TO_CONTENT_TYPE, getTestId } from "./helpers.js";
+import {
+ isAudioResponse,
+ isErrorResponse,
+ FORMAT_TO_CONTENT_TYPE,
+ getTestId,
+ resolveResponse,
+} from "./helpers.js";
import { matchFixture } from "./router.js";
import { proxyAndRecord } from "./recorder.js";
import type { Journal } from "./journal.js";
@@ -295,7 +301,7 @@ async function handleQueueSubmit(
}
journal.incrementFixtureMatchCount(fixture, fixtures, testId);
- const response = fixture.response;
+ const response = await resolveResponse(fixture, syntheticReq);
if (isErrorResponse(response)) {
const status = response.status ?? 500;
@@ -577,7 +583,7 @@ async function handleSyncRun(
}
journal.incrementFixtureMatchCount(fixture, fixtures, getTestId(req));
- const response = fixture.response;
+ const response = await resolveResponse(fixture, syntheticReq);
if (isErrorResponse(response)) {
const status = response.status ?? 500;
diff --git a/src/fal.ts b/src/fal.ts
index 85389bd..52c29c3 100644
--- a/src/fal.ts
+++ b/src/fal.ts
@@ -7,6 +7,7 @@ import {
isJSONResponse,
flattenHeaders,
getTestId,
+ resolveResponse,
} from "./helpers.js";
import { matchFixture } from "./router.js";
import { proxyAndRecord } from "./recorder.js";
@@ -361,7 +362,7 @@ export async function handleFal(
}
journal.incrementFixtureMatchCount(fixture, fixtures, testId);
- const response = fixture.response;
+ const response = await resolveResponse(fixture, syntheticReq);
if (isErrorResponse(response)) {
const status = response.status ?? 500;
diff --git a/src/fixture-loader.ts b/src/fixture-loader.ts
index 26d1ecb..9b5410e 100644
--- a/src/fixture-loader.ts
+++ b/src/fixture-loader.ts
@@ -237,242 +237,248 @@ export function validateFixtures(fixtures: Fixture[]): ValidationResult[] {
const f = fixtures[i];
const response = f.response;
- // --- Error checks ---
-
- // Response type recognition
- // Note: isContentWithToolCallsResponse must be checked before isTextResponse
- // and isToolCallResponse since it is a structural superset of both.
- if (
- !isContentWithToolCallsResponse(response) &&
- !isTextResponse(response) &&
- !isToolCallResponse(response) &&
- !isErrorResponse(response) &&
- !isEmbeddingResponse(response) &&
- !isImageResponse(response) &&
- !isAudioResponse(response) &&
- !isTranscriptionResponse(response) &&
- !isVideoResponse(response) &&
- !isJSONResponse(response)
- ) {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message:
- "response is not a recognized type (must have content, toolCalls, error, embedding, image, audio, transcription, video, or json)",
- });
- }
+ // Skip response-shape validation for function responses — they are
+ // evaluated at runtime so we cannot statically inspect them.
+ if (typeof response === "function") {
+ // Still validate match fields and numeric options below.
+ } else {
+ // --- Error checks ---
- // Text response checks
- if (isTextResponse(response)) {
- if (response.content === "") {
+ // Response type recognition
+ // Note: isContentWithToolCallsResponse must be checked before isTextResponse
+ // and isToolCallResponse since it is a structural superset of both.
+ if (
+ !isContentWithToolCallsResponse(response) &&
+ !isTextResponse(response) &&
+ !isToolCallResponse(response) &&
+ !isErrorResponse(response) &&
+ !isEmbeddingResponse(response) &&
+ !isImageResponse(response) &&
+ !isAudioResponse(response) &&
+ !isTranscriptionResponse(response) &&
+ !isVideoResponse(response) &&
+ !isJSONResponse(response)
+ ) {
results.push({
severity: "error",
fixtureIndex: i,
- message: "content is empty string",
+ message:
+ "response is not a recognized type (must have content, toolCalls, error, embedding, image, audio, transcription, video, or json)",
});
}
- validateReasoning(response, i, results);
- validateWebSearches(response, i, results);
- }
- // ContentWithToolCalls response checks
- if (isContentWithToolCallsResponse(response)) {
- if (response.content === "") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: "content is empty string",
- });
- }
- if (response.toolCalls.length === 0) {
- results.push({
- severity: "warning",
- fixtureIndex: i,
- message: "toolCalls array is empty — fixture will never produce tool calls",
- });
- }
- for (let j = 0; j < response.toolCalls.length; j++) {
- const tc = response.toolCalls[j];
- if (!tc.name) {
+ // Text response checks
+ if (isTextResponse(response)) {
+ if (response.content === "") {
results.push({
severity: "error",
fixtureIndex: i,
- message: `toolCalls[${j}].name is empty`,
+ message: "content is empty string",
});
}
- try {
- JSON.parse(tc.arguments);
- } catch {
+ validateReasoning(response, i, results);
+ validateWebSearches(response, i, results);
+ }
+
+ // ContentWithToolCalls response checks
+ if (isContentWithToolCallsResponse(response)) {
+ if (response.content === "") {
results.push({
severity: "error",
fixtureIndex: i,
- message: `toolCalls[${j}].arguments is not valid JSON: ${tc.arguments}`,
+ message: "content is empty string",
});
}
+ if (response.toolCalls.length === 0) {
+ results.push({
+ severity: "warning",
+ fixtureIndex: i,
+ message: "toolCalls array is empty — fixture will never produce tool calls",
+ });
+ }
+ for (let j = 0; j < response.toolCalls.length; j++) {
+ const tc = response.toolCalls[j];
+ if (!tc.name) {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `toolCalls[${j}].name is empty`,
+ });
+ }
+ try {
+ JSON.parse(tc.arguments);
+ } catch {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `toolCalls[${j}].arguments is not valid JSON: ${tc.arguments}`,
+ });
+ }
+ }
+ validateReasoning(response, i, results);
+ validateWebSearches(response, i, results);
}
- validateReasoning(response, i, results);
- validateWebSearches(response, i, results);
- }
- // Tool call response checks
- if (isToolCallResponse(response)) {
- if (response.toolCalls.length === 0) {
- results.push({
- severity: "warning",
- fixtureIndex: i,
- message: "toolCalls array is empty — fixture will never produce tool calls",
- });
+ // Tool call response checks
+ if (isToolCallResponse(response)) {
+ if (response.toolCalls.length === 0) {
+ results.push({
+ severity: "warning",
+ fixtureIndex: i,
+ message: "toolCalls array is empty — fixture will never produce tool calls",
+ });
+ }
+ for (let j = 0; j < response.toolCalls.length; j++) {
+ const tc = response.toolCalls[j];
+ if (!tc.name) {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `toolCalls[${j}].name is empty`,
+ });
+ }
+ try {
+ JSON.parse(tc.arguments);
+ } catch {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `toolCalls[${j}].arguments is not valid JSON: ${tc.arguments}`,
+ });
+ }
+ }
}
- for (let j = 0; j < response.toolCalls.length; j++) {
- const tc = response.toolCalls[j];
- if (!tc.name) {
+
+ // Error response checks
+ if (isErrorResponse(response)) {
+ if (!response.error.message) {
results.push({
severity: "error",
fixtureIndex: i,
- message: `toolCalls[${j}].name is empty`,
+ message: "error.message is empty",
});
}
- try {
- JSON.parse(tc.arguments);
- } catch {
+ if (response.status !== undefined && (response.status < 100 || response.status > 599)) {
results.push({
severity: "error",
fixtureIndex: i,
- message: `toolCalls[${j}].arguments is not valid JSON: ${tc.arguments}`,
+ message: `error status ${response.status} is not a valid HTTP status code`,
});
}
}
- }
-
- // Error response checks
- if (isErrorResponse(response)) {
- if (!response.error.message) {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: "error.message is empty",
- });
- }
- if (response.status !== undefined && (response.status < 100 || response.status > 599)) {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `error status ${response.status} is not a valid HTTP status code`,
- });
- }
- }
- // Embedding response checks
- if (isEmbeddingResponse(response)) {
- if (response.embedding.length === 0) {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: "embedding array is empty",
- });
- }
- for (let j = 0; j < response.embedding.length; j++) {
- if (typeof response.embedding[j] !== "number") {
+ // Embedding response checks
+ if (isEmbeddingResponse(response)) {
+ if (response.embedding.length === 0) {
results.push({
severity: "error",
fixtureIndex: i,
- message: `embedding[${j}] is not a number`,
+ message: "embedding array is empty",
});
- break; // one error is enough
+ }
+ for (let j = 0; j < response.embedding.length; j++) {
+ if (typeof response.embedding[j] !== "number") {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `embedding[${j}] is not a number`,
+ });
+ break; // one error is enough
+ }
}
}
- }
- // Audio response checks — validate object-form audio
- if (isAudioResponse(response) && typeof response.audio === "object") {
- const audioObj = response.audio;
- if (typeof audioObj.b64Json !== "string" || audioObj.b64Json === "") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: "audio.b64Json must be a non-empty string",
- });
- }
- if (audioObj.contentType !== undefined && typeof audioObj.contentType !== "string") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `audio.contentType must be a string, got ${typeof audioObj.contentType}`,
- });
+ // Audio response checks — validate object-form audio
+ if (isAudioResponse(response) && typeof response.audio === "object") {
+ const audioObj = response.audio;
+ if (typeof audioObj.b64Json !== "string" || audioObj.b64Json === "") {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: "audio.b64Json must be a non-empty string",
+ });
+ }
+ if (audioObj.contentType !== undefined && typeof audioObj.contentType !== "string") {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `audio.contentType must be a string, got ${typeof audioObj.contentType}`,
+ });
+ }
}
- }
- // Validate ResponseOverrides fields
- if (
- isTextResponse(response) ||
- isToolCallResponse(response) ||
- isContentWithToolCallsResponse(response)
- ) {
- const r = response as ResponseOverrides;
- if (r.id !== undefined && typeof r.id !== "string") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `override "id" must be a string, got ${typeof r.id}`,
- });
- }
- if (r.created !== undefined && (typeof r.created !== "number" || r.created < 0)) {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `override "created" must be a non-negative number`,
- });
- }
- if (r.model !== undefined && typeof r.model !== "string") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `override "model" must be a string, got ${typeof r.model}`,
- });
- }
- if (r.finishReason !== undefined && typeof r.finishReason !== "string") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `override "finishReason" must be a string, got ${typeof r.finishReason}`,
- });
- }
- if (r.role !== undefined && typeof r.role !== "string") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `override "role" must be a string, got ${typeof r.role}`,
- });
- }
- if (r.systemFingerprint !== undefined && typeof r.systemFingerprint !== "string") {
- results.push({
- severity: "error",
- fixtureIndex: i,
- message: `override "systemFingerprint" must be a string, got ${typeof r.systemFingerprint}`,
- });
- }
- if (r.usage !== undefined) {
- if (typeof r.usage !== "object" || r.usage === null || Array.isArray(r.usage)) {
+ // Validate ResponseOverrides fields
+ if (
+ isTextResponse(response) ||
+ isToolCallResponse(response) ||
+ isContentWithToolCallsResponse(response)
+ ) {
+ const r = response as ResponseOverrides;
+ if (r.id !== undefined && typeof r.id !== "string") {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `override "id" must be a string, got ${typeof r.id}`,
+ });
+ }
+ if (r.created !== undefined && (typeof r.created !== "number" || r.created < 0)) {
+ results.push({
+ severity: "error",
+ fixtureIndex: i,
+ message: `override "created" must be a non-negative number`,
+ });
+ }
+ if (r.model !== undefined && typeof r.model !== "string") {
results.push({
severity: "error",
fixtureIndex: i,
- message: `override "usage" must be an object`,
+ message: `override "model" must be a string, got ${typeof r.model}`,
});
- } else {
- // Check all known usage fields are numbers if present
- for (const key of Object.keys(r.usage)) {
- const val = (r.usage as Record