diff --git a/apps/docs/ai-sdk/user-profiles.mdx b/apps/docs/ai-sdk/user-profiles.mdx index 3afc41e0b..9c9cb0978 100644 --- a/apps/docs/ai-sdk/user-profiles.mdx +++ b/apps/docs/ai-sdk/user-profiles.mdx @@ -117,6 +117,32 @@ const result = await generateText({ // Uses both profile (user's expertise) AND search (previous debugging sessions) ``` +### Hybrid Search Mode + +Use `searchMode: "hybrid"` to search both memories AND document chunks. + +```typescript +const model = withSupermemory(openai("gpt-4"), "user-123", { + mode: "full", + searchMode: "hybrid", // Search memories + document chunks + searchLimit: 15 // Max results (default: 10) +}) + +const result = await generateText({ + model, + messages: [{ + role: "user", + content: "What's in my documents about quarterly goals?" + }] +}) +// Searches both extracted memories AND raw document content +``` + +**Search Mode Options:** +- `"memories"` (default) - Search only memory entries +- `"hybrid"` - Search memories + document chunks +- `"documents"` - Search only document chunks + ## Custom Prompt Templates Customize how memories are formatted and injected into the system prompt using the `promptTemplate` option. This is useful for: diff --git a/packages/tools/README.md b/packages/tools/README.md index 78963f101..d028fc3c6 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -184,6 +184,29 @@ const result = await generateText({ }) ``` +**Hybrid Search Mode (RAG)** - Search both memories AND document chunks: +```typescript +import { generateText } from "ai" +import { withSupermemory } from "@supermemory/tools/ai-sdk" +import { openai } from "@ai-sdk/openai" + +const modelWithHybrid = withSupermemory(openai("gpt-4"), "user-123", { + mode: "full", + searchMode: "hybrid", // Search memories + document chunks + searchLimit: 15 // Max results (default: 10) +}) + +const result = await generateText({ + model: modelWithHybrid, + messages: [{ role: "user", content: "What's in my documents about quarterly goals?" }], +}) +``` + +Search mode options: +- `"memories"` (default) - Search only memory entries +- `"hybrid"` - Search memories + document chunks (recommended for RAG) +- `"documents"` - Search only document chunks + #### Automatic Memory Capture The middleware can automatically save user messages as memories: @@ -652,6 +675,8 @@ interface WithSupermemoryOptions { conversationId?: string verbose?: boolean mode?: "profile" | "query" | "full" + searchMode?: "memories" | "hybrid" | "documents" + searchLimit?: number addMemory?: "always" | "never" /** Optional Supermemory API key. Use this in browser environments. */ apiKey?: string @@ -661,6 +686,8 @@ interface WithSupermemoryOptions { - **conversationId**: Optional conversation ID to group messages into a single document for contextual memory generation - **verbose**: Enable detailed logging of memory search and injection process (default: false) - **mode**: Memory search mode - "profile" (default), "query", or "full" +- **searchMode**: Search mode - "memories" (default), "hybrid", or "documents". Use "hybrid" for RAG applications +- **searchLimit**: Maximum number of search results when using hybrid/documents mode (default: 10) - **addMemory**: Automatic memory storage mode - "always" or "never" (default: "never") ## Available Tools diff --git a/packages/tools/package.json b/packages/tools/package.json index 39cde98d8..a08fd6113 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.4.00", + "version": "1.5.0", "description": "Memory tools for AI SDK and OpenAI function calling with supermemory", "scripts": { "build": "tsdown", diff --git a/packages/tools/src/ai-sdk.ts b/packages/tools/src/ai-sdk.ts index 7e10523ed..a1d4ae838 100644 --- a/packages/tools/src/ai-sdk.ts +++ b/packages/tools/src/ai-sdk.ts @@ -118,6 +118,227 @@ export const addMemoryTool = ( }) } +export const getProfileTool = ( + apiKey: string, + config?: SupermemoryToolsConfig, +) => { + const client = new Supermemory({ + apiKey, + ...(config?.baseUrl ? { baseURL: config.baseUrl } : {}), + }) + + const containerTags = getContainerTags(config) + const strict = config?.strict ?? false + + return tool({ + description: TOOL_DESCRIPTIONS.getProfile, + inputSchema: z.object({ + containerTag: strict + ? z.string().describe(PARAMETER_DESCRIPTIONS.containerTag) + : z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.containerTag), + query: z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.query), + }), + execute: async ({ containerTag, query }) => { + try { + const tag = containerTag || containerTags[0] + + const response = await client.profile({ + containerTag: tag, + ...(query && { q: query }), + }) + + return { + success: true, + profile: response.profile, + searchResults: response.searchResults, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + }, + }) +} + +export const documentListTool = ( + apiKey: string, + config?: SupermemoryToolsConfig, +) => { + const client = new Supermemory({ + apiKey, + ...(config?.baseUrl ? { baseURL: config.baseUrl } : {}), + }) + + const containerTags = getContainerTags(config) + const strict = config?.strict ?? false + + return tool({ + description: TOOL_DESCRIPTIONS.documentList, + inputSchema: z.object({ + containerTag: z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.containerTag), + limit: strict + ? z + .number() + .default(DEFAULT_VALUES.limit) + .describe(PARAMETER_DESCRIPTIONS.limit) + : z + .number() + .optional() + .default(DEFAULT_VALUES.limit) + .describe(PARAMETER_DESCRIPTIONS.limit), + offset: z + .number() + .optional() + .describe(PARAMETER_DESCRIPTIONS.offset), + status: z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.status), + }), + execute: async ({ containerTag, limit, offset, status }) => { + try { + const tag = containerTag || containerTags[0] + + const response = await client.documents.list({ + containerTags: [tag], + limit: limit || DEFAULT_VALUES.limit, + ...(offset !== undefined && { offset }), + ...(status && { status }), + }) + + return { + success: true, + documents: response.documents, + pagination: response.pagination, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + }, + }) +} + +export const documentAddTool = ( + apiKey: string, + config?: SupermemoryToolsConfig, +) => { + const client = new Supermemory({ + apiKey, + ...(config?.baseUrl ? { baseURL: config.baseUrl } : {}), + }) + + const containerTags = getContainerTags(config) + + return tool({ + description: TOOL_DESCRIPTIONS.documentAdd, + inputSchema: z.object({ + content: z.string().describe(PARAMETER_DESCRIPTIONS.content), + title: z.string().optional().describe(PARAMETER_DESCRIPTIONS.title), + description: z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.description), + }), + execute: async ({ content, title, description }) => { + try { + const metadata: Record = {} + if (title) metadata.title = title + if (description) metadata.description = description + + const response = await client.documents.add({ + content, + containerTags, + ...(Object.keys(metadata).length > 0 && { metadata }), + }) + + return { + success: true, + document: response, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + }, + }) +} + +export const memoryForgetTool = ( + apiKey: string, + config?: SupermemoryToolsConfig, +) => { + const client = new Supermemory({ + apiKey, + ...(config?.baseUrl ? { baseURL: config.baseUrl } : {}), + }) + + const containerTags = getContainerTags(config) + + return tool({ + description: TOOL_DESCRIPTIONS.memoryForget, + inputSchema: z.object({ + containerTag: z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.containerTag), + memoryId: z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.memoryId), + memoryContent: z + .string() + .optional() + .describe(PARAMETER_DESCRIPTIONS.memoryContent), + reason: z.string().optional().describe(PARAMETER_DESCRIPTIONS.reason), + }), + execute: async ({ containerTag, memoryId, memoryContent, reason }) => { + try { + if (!memoryId && !memoryContent) { + return { + success: false, + error: "Either memoryId or memoryContent must be provided", + } + } + + const tag = containerTag || containerTags[0] + + await client.memories.forget({ + containerTag: tag, + ...(memoryId && { id: memoryId }), + ...(memoryContent && { content: memoryContent }), + ...(reason && { reason }), + }) + + return { + success: true, + message: "Memory forgotten successfully", + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + }, + }) +} + /** * Create Supermemory tools for AI SDK */ @@ -128,6 +349,10 @@ export function supermemoryTools( return { searchMemories: searchMemoriesTool(apiKey, config), addMemory: addMemoryTool(apiKey, config), + getProfile: getProfileTool(apiKey, config), + documentList: documentListTool(apiKey, config), + documentAdd: documentAddTool(apiKey, config), + memoryForget: memoryForgetTool(apiKey, config), } } diff --git a/packages/tools/src/shared/index.ts b/packages/tools/src/shared/index.ts index 5a6e0f7ba..f866f3cdd 100644 --- a/packages/tools/src/shared/index.ts +++ b/packages/tools/src/shared/index.ts @@ -3,6 +3,7 @@ export type { MemoryPromptData, PromptTemplate, MemoryMode, + SearchMode, AddMemoryMode, Logger, ProfileStructure, @@ -34,9 +35,12 @@ export { // Memory client export { supermemoryProfileSearch, + supermemoryHybridSearch, buildMemoriesText, extractQueryText, getLastUserMessageText, type BuildMemoriesTextOptions, type GenericMessage, + type SearchResultItem, + type SearchResponse, } from "./memory-client" diff --git a/packages/tools/src/shared/memory-client.ts b/packages/tools/src/shared/memory-client.ts index d55926c02..a18d85347 100644 --- a/packages/tools/src/shared/memory-client.ts +++ b/packages/tools/src/shared/memory-client.ts @@ -5,12 +5,36 @@ import type { MemoryPromptData, ProfileStructure, PromptTemplate, + SearchMode, } from "./types" import { convertProfileToMarkdown, defaultPromptTemplate, } from "./prompt-builder" +/** + * Search result item from the Supermemory search API. + * Contains either a memory field (for memory results) or a chunk field (for document chunks). + */ +export interface SearchResultItem { + id: string + similarity: number + memory?: string + chunk?: string + title?: string + content?: string + metadata?: Record +} + +/** + * Response structure from the Supermemory search API. + */ +export interface SearchResponse { + results: SearchResultItem[] + total: number + timing: number +} + /** * Fetches profile and search results from the Supermemory API. * @@ -61,6 +85,59 @@ export const supermemoryProfileSearch = async ( } } +/** + * Performs a hybrid search using the Supermemory search API. + * Hybrid search returns both memories AND document chunks. + * + * @param containerTag - The container tag/user ID for scoping memories + * @param queryText - The search query text + * @param searchMode - The search mode: "memories", "hybrid", or "documents" + * @param baseUrl - The API base URL + * @param apiKey - The API key for authentication + * @param limit - Maximum number of results to return (default: 10) + * @returns The search response with results containing either memory or chunk fields + */ +export const supermemoryHybridSearch = async ( + containerTag: string, + queryText: string, + searchMode: SearchMode, + baseUrl: string, + apiKey: string, + limit = 10, +): Promise => { + const payload = JSON.stringify({ + q: queryText, + containerTag: containerTag, + searchMode: searchMode, + limit: limit, + }) + + try { + const response = await fetch(`${baseUrl}/v4/search`, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }, + body: payload, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => "Unknown error") + throw new Error( + `Supermemory search failed: ${response.status} ${response.statusText}. ${errorText}`, + ) + } + + return await response.json() + } catch (error) { + if (error instanceof Error) { + throw error + } + throw new Error(`Supermemory API request failed: ${error}`) + } +} + /** * Options for building memories text. */ @@ -72,12 +149,46 @@ export interface BuildMemoriesTextOptions { apiKey: string logger: Logger promptTemplate?: PromptTemplate + /** + * Search mode for memory retrieval: + * - "memories": Search only memory entries (default) + * - "hybrid": Search both memories AND document chunks (recommended for RAG) + * - "documents": Search only document chunks + */ + searchMode?: SearchMode + /** Maximum number of search results to return (default: 10) */ + searchLimit?: number +} + +/** + * Formats search results (memories and/or chunks) into a readable string. + */ +const formatSearchResults = ( + results: SearchResultItem[], + includeChunks: boolean, +): string => { + if (results.length === 0) return "" + + const formattedResults = results.map((result) => { + if (result.memory) { + return `- ${result.memory}` + } + if (result.chunk && includeChunks) { + return `- [Document] ${result.chunk}` + } + return null + }).filter(Boolean) + + return formattedResults.join("\n") } /** * Fetches memories from the API, deduplicates them, and formats them into * the final string to be injected into the system prompt. * + * When searchMode is "hybrid" or "documents", uses the search API to retrieve + * both memories and document chunks. Otherwise, uses the profile API. + * * @param options - Configuration for building memories text * @returns The final formatted memories string ready for injection */ @@ -92,64 +203,125 @@ export const buildMemoriesText = async ( apiKey, logger, promptTemplate = defaultPromptTemplate, + searchMode = "memories", + searchLimit = 10, } = options - const memoriesResponse = await supermemoryProfileSearch( - containerTag, - queryText, - baseUrl, - apiKey, - ) + const useHybridSearch = searchMode === "hybrid" || searchMode === "documents" - const memoryCountStatic = memoriesResponse.profile.static?.length || 0 - const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 + let userMemories = "" + let generalSearchMemories = "" - logger.info("Memory search completed", { - containerTag, - memoryCountStatic, - memoryCountDynamic, - queryText: - queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), - mode, - }) + if (useHybridSearch && queryText) { + logger.info("Using hybrid search mode", { + containerTag, + searchMode, + queryText: queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), + }) - const deduplicated = deduplicateMemories({ - static: memoriesResponse.profile.static, - dynamic: memoriesResponse.profile.dynamic, - searchResults: memoriesResponse.searchResults?.results, - }) + const searchResponse = await supermemoryHybridSearch( + containerTag, + queryText, + searchMode, + baseUrl, + apiKey, + searchLimit, + ) - logger.debug("Memory deduplication completed", { - static: { - original: memoryCountStatic, - deduplicated: deduplicated.static.length, - }, - dynamic: { - original: memoryCountDynamic, - deduplicated: deduplicated.dynamic.length, - }, - searchResults: { - original: memoriesResponse.searchResults?.results?.length, - deduplicated: deduplicated.searchResults?.length, - }, - }) + logger.info("Hybrid search completed", { + containerTag, + resultCount: searchResponse.results.length, + timing: searchResponse.timing, + searchMode, + }) - const userMemories = - mode !== "query" - ? convertProfileToMarkdown({ - profile: { - static: deduplicated.static, - dynamic: deduplicated.dynamic, - }, - searchResults: { results: [] }, - }) - : "" - const generalSearchMemories = - mode !== "profile" - ? `Search results for user's recent message: \n${deduplicated.searchResults - .map((memory) => `- ${memory}`) - .join("\n")}` - : "" + const includeChunks = searchMode === "hybrid" || searchMode === "documents" + generalSearchMemories = formatSearchResults(searchResponse.results, includeChunks) + + if (generalSearchMemories) { + generalSearchMemories = `Search results for user's recent message:\n${generalSearchMemories}` + } + + if (mode !== "query") { + const profileResponse = await supermemoryProfileSearch( + containerTag, + "", + baseUrl, + apiKey, + ) + + const deduplicated = deduplicateMemories({ + static: profileResponse.profile.static, + dynamic: profileResponse.profile.dynamic, + searchResults: [], + }) + + userMemories = convertProfileToMarkdown({ + profile: { + static: deduplicated.static, + dynamic: deduplicated.dynamic, + }, + searchResults: { results: [] }, + }) + } + } else { + const memoriesResponse = await supermemoryProfileSearch( + containerTag, + queryText, + baseUrl, + apiKey, + ) + + const memoryCountStatic = memoriesResponse.profile.static?.length || 0 + const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 + + logger.info("Memory search completed", { + containerTag, + memoryCountStatic, + memoryCountDynamic, + queryText: + queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), + mode, + }) + + const deduplicated = deduplicateMemories({ + static: memoriesResponse.profile.static, + dynamic: memoriesResponse.profile.dynamic, + searchResults: memoriesResponse.searchResults?.results, + }) + + logger.debug("Memory deduplication completed", { + static: { + original: memoryCountStatic, + deduplicated: deduplicated.static.length, + }, + dynamic: { + original: memoryCountDynamic, + deduplicated: deduplicated.dynamic.length, + }, + searchResults: { + original: memoriesResponse.searchResults?.results?.length, + deduplicated: deduplicated.searchResults?.length, + }, + }) + + userMemories = + mode !== "query" + ? convertProfileToMarkdown({ + profile: { + static: deduplicated.static, + dynamic: deduplicated.dynamic, + }, + searchResults: { results: [] }, + }) + : "" + generalSearchMemories = + mode !== "profile" + ? `Search results for user's recent message: \n${deduplicated.searchResults + .map((memory) => `- ${memory}`) + .join("\n")}` + : "" + } const promptData: MemoryPromptData = { userMemories, diff --git a/packages/tools/src/shared/types.ts b/packages/tools/src/shared/types.ts index 0d457e85b..f8968e761 100644 --- a/packages/tools/src/shared/types.ts +++ b/packages/tools/src/shared/types.ts @@ -40,6 +40,14 @@ export type PromptTemplate = (data: MemoryPromptData) => string */ export type MemoryMode = "profile" | "query" | "full" +/** + * Search mode for memory retrieval: + * - "memories": Search only memory entries (default) + * - "hybrid": Search both memories AND document chunks (recommended for RAG) + * - "documents": Search only document chunks + */ +export type SearchMode = "memories" | "hybrid" | "documents" + /** * Memory persistence mode: * - "always": Automatically save conversations as memories @@ -110,6 +118,15 @@ export interface SupermemoryBaseOptions { threadId?: string /** Memory retrieval mode */ mode?: MemoryMode + /** + * Search mode for memory retrieval: + * - "memories": Search only memory entries (default) + * - "hybrid": Search both memories AND document chunks (recommended for RAG) + * - "documents": Search only document chunks + */ + searchMode?: SearchMode + /** Maximum number of search results to return when using hybrid/documents mode (default: 10) */ + searchLimit?: number /** Memory persistence mode */ addMemory?: AddMemoryMode /** Enable detailed logging of memory search and injection */ diff --git a/packages/tools/src/tools-shared.ts b/packages/tools/src/tools-shared.ts index b40000424..818762ba2 100644 --- a/packages/tools/src/tools-shared.ts +++ b/packages/tools/src/tools-shared.ts @@ -8,6 +8,14 @@ export const TOOL_DESCRIPTIONS = { "Search (recall) memories/details/information about the user or other facts or entities. Run when explicitly asked or when context about user's past choices would be helpful.", addMemory: "Add (remember) memories/details/information about the user or other facts or entities. Run when explicitly asked or when the user mentions any information generalizable beyond the context of the current conversation.", + getProfile: + "Get user profile containing static memories (permanent facts) and dynamic memories (recent context). Optionally include search results by providing a query.", + documentList: + "List stored documents with optional filtering by container tag, status, and pagination. Useful for browsing or managing saved content.", + documentAdd: + "Add a new document (URL, text, or content) to memory. The content is queued for processing, and memories will be extracted automatically.", + memoryForget: + "Forget (soft delete) a specific memory by ID or content match. The memory is marked as forgotten but not permanently deleted. Use when user wants to remove specific information from their profile.", } as const // Parameter descriptions @@ -18,6 +26,16 @@ export const PARAMETER_DESCRIPTIONS = { limit: "Maximum number of results to return", memory: "The text content of the memory to add. This should be a single sentence or a short paragraph.", + containerTag: "Tag to filter/scope the operation (e.g., user ID, project ID)", + query: "Optional search query to include relevant search results", + offset: "Number of items to skip for pagination (default: 0)", + status: "Filter documents by processing status (e.g., 'completed', 'processing', 'failed')", + content: "The content to add - can be text, URL, or other supported formats", + title: "Optional title for the document", + description: "Optional description for the document", + memoryId: "The unique identifier of the memory entry", + memoryContent: "Exact content match of the memory entry to operate on (alternative to ID)", + reason: "Optional reason for forgetting this memory", } as const // Default values diff --git a/packages/tools/src/tools.test.ts b/packages/tools/src/tools.test.ts index ab6f9b848..56ef7b62f 100644 --- a/packages/tools/src/tools.test.ts +++ b/packages/tools/src/tools.test.ts @@ -34,6 +34,11 @@ describe("@supermemory/tools", () => { expect(tools).toBeDefined() expect(tools.searchMemories).toBeDefined() expect(tools.addMemory).toBeDefined() + expect(tools.getProfile).toBeDefined() + expect(tools.documentList).toBeDefined() + expect(tools.documentDelete).toBeDefined() + expect(tools.documentAdd).toBeDefined() + expect(tools.memoryForget).toBeDefined() }) it("should create tools with custom baseUrl", () => { @@ -45,6 +50,11 @@ describe("@supermemory/tools", () => { expect(tools).toBeDefined() expect(tools.searchMemories).toBeDefined() expect(tools.addMemory).toBeDefined() + expect(tools.getProfile).toBeDefined() + expect(tools.documentList).toBeDefined() + expect(tools.documentDelete).toBeDefined() + expect(tools.documentAdd).toBeDefined() + expect(tools.memoryForget).toBeDefined() }) it("should create individual tools", () => { @@ -54,9 +64,29 @@ describe("@supermemory/tools", () => { const addTool = aiSdk.addMemoryTool(testApiKey, { projectId: "test-project-123", }) + const profileTool = aiSdk.getProfileTool(testApiKey, { + projectId: "test-project-123", + }) + const listTool = aiSdk.documentListTool(testApiKey, { + projectId: "test-project-123", + }) + const deleteTool = aiSdk.documentDeleteTool(testApiKey, { + projectId: "test-project-123", + }) + const addDocTool = aiSdk.documentAddTool(testApiKey, { + projectId: "test-project-123", + }) + const forgetTool = aiSdk.memoryForgetTool(testApiKey, { + projectId: "test-project-123", + }) expect(searchTool).toBeDefined() expect(addTool).toBeDefined() + expect(profileTool).toBeDefined() + expect(listTool).toBeDefined() + expect(deleteTool).toBeDefined() + expect(addDocTool).toBeDefined() + expect(forgetTool).toBeDefined() }) }) @@ -123,6 +153,128 @@ describe("@supermemory/tools", () => { expect(result).toBeDefined() expect(result.text).toBeDefined() }) + + it("should work with new profile tool", async () => { + const openai = createOpenAI({ + apiKey: testOpenAIKey, + }) + + const tools = aiSdk.supermemoryTools(testApiKey, { + projectId: "test-profile-tool", + baseUrl: testBaseUrl, + }) + + const result = await generateText({ + model: openai(testModelName), + messages: [ + { + role: "system", + content: + "You are a helpful assistant. When asked about user profile or preferences, use the getProfile tool.", + }, + { + role: "user", + content: "What do you know about me?", + }, + ], + tools: { + getProfile: tools.getProfile, + }, + }) + + expect(result).toBeDefined() + expect(result.text).toBeDefined() + }) + + it("should work with new document tools", async () => { + const openai = createOpenAI({ + apiKey: testOpenAIKey, + }) + + const tools = aiSdk.supermemoryTools(testApiKey, { + projectId: "test-document-tools", + baseUrl: testBaseUrl, + }) + + const result = await generateText({ + model: openai(testModelName), + messages: [ + { + role: "system", + content: + "You are a helpful assistant. When asked to list documents, use the documentList tool.", + }, + { + role: "user", + content: "Show me my saved documents", + }, + ], + tools: { + documentList: tools.documentList, + }, + }) + + expect(result).toBeDefined() + expect(result.text).toBeDefined() + }) + }) + + describe("new tool operations", () => { + it("should get profile with getProfileTool", async () => { + const profileTool = aiSdk.getProfileTool(testApiKey, { + projectId: "test-profile", + baseUrl: testBaseUrl, + }) + + // Verify tool is a valid CoreTool from AI SDK + expect(profileTool).toBeDefined() + expect(profileTool.description).toBeDefined() + expect(typeof profileTool.description).toBe("string") + }) + + it("should list documents with documentListTool", async () => { + const listTool = aiSdk.documentListTool(testApiKey, { + projectId: "test-list", + baseUrl: testBaseUrl, + }) + + expect(listTool).toBeDefined() + expect(listTool.description).toBeDefined() + expect(typeof listTool.description).toBe("string") + }) + + it("should create documentDeleteTool", async () => { + const deleteTool = aiSdk.documentDeleteTool(testApiKey, { + projectId: "test-delete", + baseUrl: testBaseUrl, + }) + + expect(deleteTool).toBeDefined() + expect(deleteTool.description).toBeDefined() + expect(typeof deleteTool.description).toBe("string") + }) + + it("should create documentAddTool", async () => { + const addDocTool = aiSdk.documentAddTool(testApiKey, { + projectId: "test-add-doc", + baseUrl: testBaseUrl, + }) + + expect(addDocTool).toBeDefined() + expect(addDocTool.description).toBeDefined() + expect(typeof addDocTool.description).toBe("string") + }) + + it("should create memoryForgetTool", async () => { + const forgetTool = aiSdk.memoryForgetTool(testApiKey, { + projectId: "test-forget", + baseUrl: testBaseUrl, + }) + + expect(forgetTool).toBeDefined() + expect(forgetTool.description).toBeDefined() + expect(typeof forgetTool.description).toBe("string") + }) }) }) diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts index beeef093b..f260d903b 100644 --- a/packages/tools/src/vercel/index.ts +++ b/packages/tools/src/vercel/index.ts @@ -24,6 +24,15 @@ interface WrapVercelLanguageModelOptions { * - "full": Combines both profile and query-based results */ mode?: "profile" | "query" | "full" + /** + * Search mode for memory retrieval: + * - "memories": Search only memory entries (default) + * - "hybrid": Search both memories AND document chunks (recommended for RAG) + * - "documents": Search only document chunks + */ + searchMode?: "memories" | "hybrid" | "documents" + /** Maximum number of search results to return when using hybrid/documents mode (default: 10) */ + searchLimit?: number /** * Memory persistence mode: * - "always": Automatically save conversations as memories @@ -69,7 +78,9 @@ interface WrapVercelLanguageModelOptions { * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation * @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false) * @param options.mode - Optional mode for memory search: "profile", "query", or "full" (default: "profile") - * @param options.addMemory - Optional mode for memory search: "always", "never" (default: "never") + * @param options.searchMode - Optional search mode: "memories" (default), "hybrid" (memories + chunks), or "documents" (chunks only) + * @param options.searchLimit - Optional maximum number of search results when using hybrid/documents mode (default: 10) + * @param options.addMemory - Optional mode for memory persistence: "always", "never" (default: "never") * @param options.apiKey - Optional Supermemory API key to use instead of the environment variable * @param options.baseUrl - Optional base URL for the Supermemory API (default: "https://api.supermemory.ai") * @@ -80,15 +91,22 @@ interface WrapVercelLanguageModelOptions { * import { withSupermemory } from "@supermemory/tools/ai-sdk" * import { openai } from "@ai-sdk/openai" * + * // Basic usage with profile memories * const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123", { - * conversationId: "conversation-456", * mode: "full", * addMemory: "always" * }) * + * // RAG usage with hybrid search (memories + document chunks) + * const ragModel = withSupermemory(openai("gpt-4"), "user-123", { + * mode: "full", + * searchMode: "hybrid", // Search both memories and document chunks + * searchLimit: 15, + * }) + * * const result = await generateText({ - * model: modelWithMemory, - * messages: [{ role: "user", content: "What's my favorite programming language?" }] + * model: ragModel, + * messages: [{ role: "user", content: "What's in my documents about quarterly goals?" }] * }) * ``` * @@ -114,6 +132,8 @@ const wrapVercelLanguageModel = ( conversationId: options?.conversationId, verbose: options?.verbose ?? false, mode: options?.mode ?? "profile", + searchMode: options?.searchMode ?? "memories", + searchLimit: options?.searchLimit ?? 10, addMemory: options?.addMemory ?? "never", baseUrl: options?.baseUrl, promptTemplate: options?.promptTemplate, diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts index 8c31b86fd..238178245 100644 --- a/packages/tools/src/vercel/middleware.ts +++ b/packages/tools/src/vercel/middleware.ts @@ -11,6 +11,7 @@ import { type Logger, type PromptTemplate, type MemoryMode, + type SearchMode, } from "../shared" import { type LanguageModelCallOptions, @@ -178,6 +179,15 @@ interface SupermemoryMiddlewareOptions { * - "full": Combines both profile and query-based results */ mode?: MemoryMode + /** + * Search mode for memory retrieval: + * - "memories": Search only memory entries (default) + * - "hybrid": Search both memories AND document chunks (recommended for RAG) + * - "documents": Search only document chunks + */ + searchMode?: SearchMode + /** Maximum number of search results to return (default: 10) */ + searchLimit?: number /** * Memory persistence mode: * - "always": Automatically save conversations as memories @@ -196,6 +206,8 @@ interface SupermemoryMiddlewareContext { containerTag: string conversationId?: string mode: MemoryMode + searchMode: SearchMode + searchLimit: number addMemory: "always" | "never" normalizedBaseUrl: string apiKey: string @@ -216,6 +228,8 @@ export const createSupermemoryContext = ( conversationId, verbose = false, mode = "profile", + searchMode = "memories", + searchLimit = 10, addMemory = "never", baseUrl, promptTemplate, @@ -237,6 +251,8 @@ export const createSupermemoryContext = ( containerTag, conversationId, mode, + searchMode, + searchLimit, addMemory, normalizedBaseUrl, apiKey, @@ -298,6 +314,7 @@ export const transformParamsWithMemory = async ( containerTag: ctx.containerTag, conversationId: ctx.conversationId, mode: ctx.mode, + searchMode: ctx.searchMode, isNewTurn, cacheHit: false, }) @@ -312,6 +329,8 @@ export const transformParamsWithMemory = async ( apiKey: ctx.apiKey, logger: ctx.logger, promptTemplate: ctx.promptTemplate, + searchMode: ctx.searchMode, + searchLimit: ctx.searchLimit, }) ctx.memoryCache.set(turnKey, memories)