aboutsummaryrefslogtreecommitdiff
path: root/packages/tools
diff options
context:
space:
mode:
authorMaheshtheDev <[email protected]>2026-01-20 01:30:43 +0000
committerMaheshtheDev <[email protected]>2026-01-20 01:30:43 +0000
commit32a7eff3af6e82fd3d2b419ecd016ab144d4c508 (patch)
treebec738b44bc7f35f7f34083c1462ed5e8e9cc856 /packages/tools
parentremove bun lock file (#686) (diff)
downloadsupermemory-32a7eff3af6e82fd3d2b419ecd016ab144d4c508.tar.xz
supermemory-32a7eff3af6e82fd3d2b419ecd016ab144d4c508.zip
fix(tools): multi step agent prompt caching (#685)01-19-fix_tools_multi_step_agent_prompt_caching
Diffstat (limited to 'packages/tools')
-rw-r--r--packages/tools/README.md1
-rw-r--r--packages/tools/package.json2
-rw-r--r--packages/tools/src/vercel/memory-prompt.ts144
-rw-r--r--packages/tools/src/vercel/middleware.ts129
-rw-r--r--packages/tools/src/vercel/util.ts24
-rw-r--r--packages/tools/test/vercel.test.ts549
-rw-r--r--packages/tools/test/with-supermemory/integration.test.ts538
-rw-r--r--packages/tools/test/with-supermemory/unit.test.ts367
8 files changed, 1112 insertions, 642 deletions
diff --git a/packages/tools/README.md b/packages/tools/README.md
index c0308141..e859ede7 100644
--- a/packages/tools/README.md
+++ b/packages/tools/README.md
@@ -58,6 +58,7 @@ const addTool = addMemoryTool(process.env.SUPERMEMORY_API_KEY!, {
- `withSupermemory` will take advantage supermemory profile v4 endpoint personalized based on container tag
- You can provide the Supermemory API key via the `apiKey` option to `withSupermemory` (recommended for browser usage), or fall back to `SUPERMEMORY_API_KEY` in the environment for server usage.
+- **Per-turn caching**: Memory injection is cached for tool-call continuations within the same user turn. The middleware detects when the AI SDK is continuing a multi-step flow (e.g., after a tool call) and reuses the cached memories instead of making redundant API calls. A fresh fetch occurs on each new user message turn.
```typescript
import { generateText } from "ai"
diff --git a/packages/tools/package.json b/packages/tools/package.json
index aba90b46..82187aaa 100644
--- a/packages/tools/package.json
+++ b/packages/tools/package.json
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
- "version": "1.3.66",
+ "version": "1.3.67",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
diff --git a/packages/tools/src/vercel/memory-prompt.ts b/packages/tools/src/vercel/memory-prompt.ts
index 3dfc203f..185a3c41 100644
--- a/packages/tools/src/vercel/memory-prompt.ts
+++ b/packages/tools/src/vercel/memory-prompt.ts
@@ -93,33 +93,38 @@ const supermemoryProfileSearch = async (
}
}
-export const addSystemPrompt = async (
- params: LanguageModelCallOptions,
- containerTag: string,
- logger: Logger,
- mode: "profile" | "query" | "full",
- baseUrl: string,
- apiKey: string,
- promptTemplate: PromptTemplate = defaultPromptTemplate,
-): Promise<LanguageModelCallOptions> => {
- const systemPromptExists = params.prompt.some(
- (prompt) => prompt.role === "system",
- )
+/**
+ * Options for building memories text.
+ */
+export interface BuildMemoriesTextOptions {
+ containerTag: string
+ queryText: string
+ mode: "profile" | "query" | "full"
+ baseUrl: string
+ apiKey: string
+ logger: Logger
+ promptTemplate?: PromptTemplate
+}
- const queryText =
- mode !== "profile"
- ? params.prompt
- .slice()
- .reverse()
- .find((prompt: { role: string }) => prompt.role === "user")
- ?.content?.filter(
- (content: { type: string }) => content.type === "text",
- )
- ?.map((content: { type: string; text: string }) =>
- content.type === "text" ? content.text : "",
- )
- ?.join(" ") || ""
- : ""
+/**
+ * Fetches memories from the API, deduplicates them, and formats them into
+ * the final string to be injected into the system prompt.
+ *
+ * @param options - Configuration for building memories text
+ * @returns The final formatted memories string ready for injection
+ */
+export const buildMemoriesText = async (
+ options: BuildMemoriesTextOptions,
+): Promise<string> => {
+ const {
+ containerTag,
+ queryText,
+ mode,
+ baseUrl,
+ apiKey,
+ logger,
+ promptTemplate = defaultPromptTemplate,
+ } = options
const memoriesResponse = await supermemoryProfileSearch(
containerTag,
@@ -191,6 +196,27 @@ export const addSystemPrompt = async (
})
}
+ return memories
+}
+
+/**
+ * Injects memories string into params by appending to existing system prompt
+ * or creating a new one. Pure function - does not mutate the original params.
+ *
+ * @param params - The language model call options
+ * @param memories - The formatted memories string to inject
+ * @param logger - Logger for debug output
+ * @returns New params with memories injected into the system prompt
+ */
+export const injectMemoriesIntoParams = (
+ params: LanguageModelCallOptions,
+ memories: string,
+ logger: Logger,
+): LanguageModelCallOptions => {
+ const systemPromptExists = params.prompt.some(
+ (prompt) => prompt.role === "system",
+ )
+
if (systemPromptExists) {
logger.debug("Added memories to existing system prompt")
// biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3 prompt types
@@ -212,3 +238,69 @@ export const addSystemPrompt = async (
] as any
return { ...params, prompt: newPrompt } as LanguageModelCallOptions
}
+
+/**
+ * Extracts the query text from params based on mode.
+ * For "profile" mode, returns empty string (no query needed).
+ * For "query" or "full" mode, extracts the last user message text.
+ *
+ * @param params - The language model call options
+ * @param mode - The memory retrieval mode
+ * @returns The query text for memory search
+ */
+export const extractQueryText = (
+ params: LanguageModelCallOptions,
+ mode: "profile" | "query" | "full",
+): string => {
+ if (mode === "profile") {
+ return ""
+ }
+
+ const userMessage = params.prompt
+ .slice()
+ .reverse()
+ .find((prompt: { role: string }) => prompt.role === "user")
+
+ const content = userMessage?.content
+ if (!content) return ""
+
+ if (typeof content === "string") {
+ return content
+ }
+
+ // biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3
+ return (content as any[])
+ .filter((part) => part.type === "text")
+ .map((part) => part.text || "")
+ .join(" ")
+}
+
+/**
+ * Adds memories to the system prompt by fetching from API and injecting.
+ * This is the original combined function, now implemented via helpers.
+ *
+ * @deprecated Prefer using buildMemoriesText + injectMemoriesIntoParams for caching support
+ */
+export const addSystemPrompt = async (
+ params: LanguageModelCallOptions,
+ containerTag: string,
+ logger: Logger,
+ mode: "profile" | "query" | "full",
+ baseUrl: string,
+ apiKey: string,
+ promptTemplate: PromptTemplate = defaultPromptTemplate,
+): Promise<LanguageModelCallOptions> => {
+ const queryText = extractQueryText(params, mode)
+
+ const memories = await buildMemoriesText({
+ containerTag,
+ queryText,
+ mode,
+ baseUrl,
+ apiKey,
+ logger,
+ promptTemplate,
+ })
+
+ return injectMemoriesIntoParams(params, memories, logger)
+}
diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts
index 8336b397..e3a9ac57 100644
--- a/packages/tools/src/vercel/middleware.ts
+++ b/packages/tools/src/vercel/middleware.ts
@@ -6,18 +6,18 @@ import {
import { createLogger, type Logger } from "./logger"
import {
type LanguageModelCallOptions,
- type LanguageModelStreamPart,
- type OutputContentItem,
getLastUserMessage,
filterOutSupermemories,
} from "./util"
import {
- addSystemPrompt,
+ buildMemoriesText,
+ extractQueryText,
+ injectMemoriesIntoParams,
normalizeBaseUrl,
type PromptTemplate,
} from "./memory-prompt"
-export const getConversationContent = (params: LanguageModelCallOptions) => {
+const getConversationContent = (params: LanguageModelCallOptions) => {
return params.prompt
.filter((msg) => msg.role !== "system" && msg.role !== "tool")
.map((msg) => {
@@ -36,7 +36,7 @@ export const getConversationContent = (params: LanguageModelCallOptions) => {
.join("\n\n")
}
-export const convertToConversationMessages = (
+const convertToConversationMessages = (
params: LanguageModelCallOptions,
assistantResponseText: string,
): ConversationMessage[] => {
@@ -160,7 +160,7 @@ export const saveMemoryAfterResponse = async (
/**
* Configuration options for the Supermemory middleware.
*/
-export interface SupermemoryMiddlewareOptions {
+interface SupermemoryMiddlewareOptions {
/** Container tag/identifier for memory search (e.g., user ID, project ID) */
containerTag: string
/** Supermemory API key */
@@ -188,7 +188,12 @@ export interface SupermemoryMiddlewareOptions {
promptTemplate?: PromptTemplate
}
-export interface SupermemoryMiddlewareContext {
+/**
+ * Cached memories string for a user turn.
+ */
+type MemoryCache = string
+
+interface SupermemoryMiddlewareContext {
client: Supermemory
logger: Logger
containerTag: string
@@ -198,6 +203,11 @@ export interface SupermemoryMiddlewareContext {
normalizedBaseUrl: string
apiKey: string
promptTemplate?: PromptTemplate
+ /**
+ * Per-turn memory cache map. Stores the injected memories string for each
+ * user turn (keyed by turnKey) to avoid redundant API calls during tool-call
+ */
+ memoryCache: Map<string, MemoryCache>
}
export const createSupermemoryContext = (
@@ -234,9 +244,30 @@ export const createSupermemoryContext = (
normalizedBaseUrl,
apiKey,
promptTemplate,
+ memoryCache: new Map<string, MemoryCache>(),
}
}
+/**
+ * Generates a cache key for the current turn based on context and user message.
+ * Normalizes the user message by trimming and collapsing whitespace.
+ */
+const makeTurnKey = (
+ ctx: SupermemoryMiddlewareContext,
+ userMessage: string,
+): string => {
+ const normalizedMessage = userMessage.trim().replace(/\s+/g, " ")
+ return `${ctx.containerTag}:${ctx.conversationId || ""}:${ctx.mode}:${normalizedMessage}`
+}
+
+/**
+ * Checks if this is a new user turn (last message is from user)
+ */
+const isNewUserTurn = (params: LanguageModelCallOptions): boolean => {
+ const lastMessage = params.prompt.at(-1)
+ return lastMessage?.role === "user"
+}
+
export const transformParamsWithMemory = async (
params: LanguageModelCallOptions,
ctx: SupermemoryMiddlewareContext,
@@ -250,22 +281,42 @@ export const transformParamsWithMemory = async (
}
}
+ const turnKey = makeTurnKey(ctx, userMessage || "")
+ const isNewTurn = isNewUserTurn(params)
+
+ // Check if we can use cached memories
+ const cachedMemories = ctx.memoryCache.get(turnKey)
+ if (!isNewTurn && cachedMemories) {
+ ctx.logger.debug("Using cached memories: ", {
+ turnKey,
+ })
+ return injectMemoriesIntoParams(params, cachedMemories, ctx.logger)
+ }
+
ctx.logger.info("Starting memory search", {
containerTag: ctx.containerTag,
conversationId: ctx.conversationId,
mode: ctx.mode,
+ isNewTurn,
+ cacheHit: false,
})
- const transformedParams = await addSystemPrompt(
- params,
- ctx.containerTag,
- ctx.logger,
- ctx.mode,
- ctx.normalizedBaseUrl,
- ctx.apiKey,
- ctx.promptTemplate,
- )
- return transformedParams
+ const queryText = extractQueryText(params, ctx.mode)
+
+ const memories = await buildMemoriesText({
+ containerTag: ctx.containerTag,
+ queryText,
+ mode: ctx.mode,
+ baseUrl: ctx.normalizedBaseUrl,
+ apiKey: ctx.apiKey,
+ logger: ctx.logger,
+ promptTemplate: ctx.promptTemplate,
+ })
+
+ ctx.memoryCache.set(turnKey, memories)
+ ctx.logger.debug("Cached memories for turn", { turnKey })
+
+ return injectMemoriesIntoParams(params, memories, ctx.logger)
}
export const extractAssistantResponseText = (content: unknown[]): string => {
@@ -273,47 +324,3 @@ export const extractAssistantResponseText = (content: unknown[]): string => {
.map((item) => (item.type === "text" ? item.text || "" : ""))
.join("")
}
-
-export const createStreamTransform = (
- ctx: SupermemoryMiddlewareContext,
- params: LanguageModelCallOptions,
-): {
- transform: TransformStream<LanguageModelStreamPart, LanguageModelStreamPart>
- getGeneratedText: () => string
-} => {
- let generatedText = ""
-
- const transform = new TransformStream<
- LanguageModelStreamPart,
- LanguageModelStreamPart
- >({
- transform(chunk, controller) {
- if (chunk.type === "text-delta") {
- generatedText += chunk.delta
- }
- controller.enqueue(chunk)
- },
- flush: async () => {
- const userMessage = getLastUserMessage(params)
- if (ctx.addMemory === "always" && userMessage && userMessage.trim()) {
- saveMemoryAfterResponse(
- ctx.client,
- ctx.containerTag,
- ctx.conversationId,
- generatedText,
- params,
- ctx.logger,
- ctx.apiKey,
- ctx.normalizedBaseUrl,
- )
- }
- },
- })
-
- return {
- transform,
- getGeneratedText: () => generatedText,
- }
-}
-
-export { createLogger, type Logger, type OutputContentItem }
diff --git a/packages/tools/src/vercel/util.ts b/packages/tools/src/vercel/util.ts
index eec29859..01b655e1 100644
--- a/packages/tools/src/vercel/util.ts
+++ b/packages/tools/src/vercel/util.ts
@@ -100,16 +100,30 @@ export function convertProfileToMarkdown(data: ProfileMarkdownData): string {
return sections.join("\n\n")
}
-export const getLastUserMessage = (params: LanguageModelCallOptions) => {
+export const getLastUserMessage = (
+ params: LanguageModelCallOptions,
+): string | undefined => {
const lastUserMessage = params.prompt
.slice()
.reverse()
.find((prompt: LanguageModelMessage) => prompt.role === "user")
- const memories = lastUserMessage?.content
- .filter((content) => content.type === "text")
- .map((content) => (content as { type: "text"; text: string }).text)
+
+ if (!lastUserMessage) {
+ return undefined
+ }
+
+ const content = lastUserMessage.content
+
+ // Handle string content directly
+ if (typeof content === "string") {
+ return content
+ }
+
+ // Handle array content - extract text parts
+ return content
+ .filter((part) => part.type === "text")
+ .map((part) => (part as { type: "text"; text: string }).text)
.join(" ")
- return memories
}
export const filterOutSupermemories = (content: string) => {
diff --git a/packages/tools/test/vercel.test.ts b/packages/tools/test/vercel.test.ts
deleted file mode 100644
index 32197977..00000000
--- a/packages/tools/test/vercel.test.ts
+++ /dev/null
@@ -1,549 +0,0 @@
-import { describe, it, expect, beforeEach, vi, afterEach } from "vitest"
-import { withSupermemory } from "../src/vercel"
-import { createSupermemoryMiddleware } from "../src/vercel/middleware"
-import type {
- LanguageModelV2,
- LanguageModelV2CallOptions,
-} from "@ai-sdk/provider"
-import Supermemory from "supermemory"
-import "dotenv/config"
-
-// Test configuration
-const TEST_CONFIG = {
- apiKey: process.env.SUPERMEMORY_API_KEY || "test-api-key",
- baseURL: process.env.SUPERMEMORY_BASE_URL,
- containerTag: "test-vercel-wrapper",
-}
-
-// Mock language model for testing
-const createMockLanguageModel = (): LanguageModelV2 => ({
- specificationVersion: "v2",
- provider: "test-provider",
- modelId: "test-model",
- supportedUrls: {},
- doGenerate: vi.fn(),
- doStream: vi.fn(),
-})
-
-// Mock supermemory search response
-const createMockSearchResponse = (contents: string[]) => ({
- results: contents.map((content) => ({
- chunks: [{ content }],
- })),
-})
-
-// Helper to call transformParams with proper signature
-const callTransformParams = async (
- middleware: ReturnType<typeof createSupermemoryMiddleware>,
- params: LanguageModelV2CallOptions,
-) => {
- const mockModel = createMockLanguageModel()
- return middleware.transformParams?.({
- type: "generate",
- params,
- model: mockModel,
- })
-}
-
-describe("withSupermemory / wrapVercelLanguageModel", () => {
- let originalEnv: string | undefined
-
- beforeEach(() => {
- originalEnv = process.env.SUPERMEMORY_API_KEY
- vi.clearAllMocks()
- })
-
- afterEach(() => {
- if (originalEnv) {
- process.env.SUPERMEMORY_API_KEY = originalEnv
- } else {
- delete process.env.SUPERMEMORY_API_KEY
- }
- })
-
- describe("Environment validation", () => {
- it("should throw error if SUPERMEMORY_API_KEY is not set", () => {
- delete process.env.SUPERMEMORY_API_KEY
-
- const mockModel = createMockLanguageModel()
-
- expect(() => {
- withSupermemory(mockModel, TEST_CONFIG.containerTag)
- }).toThrow("SUPERMEMORY_API_KEY is not set")
- })
-
- it("should successfully create wrapped model with valid API key", () => {
- process.env.SUPERMEMORY_API_KEY = "test-key"
-
- const mockModel = createMockLanguageModel()
- const wrappedModel = withSupermemory(mockModel, TEST_CONFIG.containerTag)
-
- expect(wrappedModel).toBeDefined()
- expect(wrappedModel.specificationVersion).toBe("v2")
- })
- })
-
- describe("createSupermemoryMiddleware", () => {
- // biome-ignore lint/suspicious/noExplicitAny: Mock object for testing
- let mockSupermemory: any
-
- beforeEach(() => {
- mockSupermemory = {
- search: {
- execute: vi.fn(),
- },
- }
- })
-
- it("should return params unchanged when there is no user message", async () => {
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "system",
- content: "You are a helpful assistant",
- },
- ],
- }
-
- const result = await callTransformParams(middleware, params)
-
- expect(result).toEqual(params)
- expect(mockSupermemory.search.execute).not.toHaveBeenCalled()
- })
-
- it("should extract last user message with text content", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "Hello, how are you?" }],
- },
- ],
- }
-
- await callTransformParams(middleware, params)
-
- expect(mockSupermemory.search.execute).toHaveBeenCalledWith({
- q: "Hello, how are you?",
- containerTags: [TEST_CONFIG.containerTag],
- })
- })
-
- it("should handle multiple user messages and extract the last one", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "First message" }],
- },
- {
- role: "assistant",
- content: [{ type: "text", text: "Response" }],
- },
- {
- role: "user",
- content: [{ type: "text", text: "Last message" }],
- },
- ],
- }
-
- await callTransformParams(middleware, params)
-
- expect(mockSupermemory.search.execute).toHaveBeenCalledWith({
- q: "Last message",
- containerTags: [TEST_CONFIG.containerTag],
- })
- })
-
- it("should concatenate multiple text parts in user message", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [
- { type: "text", text: "Part 1" },
- { type: "text", text: "Part 2" },
- { type: "text", text: "Part 3" },
- ],
- },
- ],
- }
-
- await callTransformParams(middleware, params)
-
- expect(mockSupermemory.search.execute).toHaveBeenCalledWith({
- q: "Part 1 Part 2 Part 3",
- containerTags: [TEST_CONFIG.containerTag],
- })
- })
-
- it("should create new system prompt when none exists", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([
- "Memory 1: User likes TypeScript",
- "Memory 2: User prefers clean code",
- ]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "Tell me about TypeScript" }],
- },
- ],
- }
-
- const result = await callTransformParams(middleware, params)
-
- expect(result?.prompt).toHaveLength(2)
- expect(result?.prompt[0]?.role).toBe("system")
- expect(result?.prompt[0]?.content).toContain(
- "Memory 1: User likes TypeScript Memory 2: User prefers clean code",
- )
- expect(result?.prompt[1]?.role).toBe("user")
- })
-
- it("should append memories to existing system prompt", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse(["Memory: User is an expert developer"]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "system",
- content: "You are a helpful coding assistant",
- },
- {
- role: "user",
- content: [{ type: "text", text: "Help me code" }],
- },
- ],
- }
-
- const result = await callTransformParams(middleware, params)
-
- expect(result?.prompt).toHaveLength(2)
- expect(result?.prompt[0]?.role).toBe("system")
- expect(result?.prompt[0]?.content).toContain(
- "You are a helpful coding assistant",
- )
- expect(result?.prompt[0]?.content).toContain(
- "Memory: User is an expert developer",
- )
- })
-
- it("should handle empty memory results", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "Hello" }],
- },
- ],
- }
-
- const result = await callTransformParams(middleware, params)
-
- // Should still create system prompt even if memories are empty
- expect(result?.prompt).toHaveLength(2)
- expect(result?.prompt[0]?.role).toBe("system")
- })
-
- it("should filter out non-text content from user message", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [
- { type: "text", text: "Text part" },
- // File part is non-text content
- { type: "file", data: "base64...", mimeType: "image/png" },
- { type: "text", text: "Another text part" },
- ],
- },
- ],
- }
-
- await callTransformParams(middleware, params)
-
- // Should only extract text content
- expect(mockSupermemory.search.execute).toHaveBeenCalledWith({
- q: "Text part Another text part",
- containerTags: [TEST_CONFIG.containerTag],
- })
- })
-
- it("should handle multiple memory chunks correctly", async () => {
- mockSupermemory.search.execute.mockResolvedValue({
- results: [
- {
- chunks: [
- { content: "Chunk 1" },
- { content: "Chunk 2" },
- { content: "Chunk 3" },
- ],
- },
- {
- chunks: [{ content: "Chunk 4" }, { content: "Chunk 5" }],
- },
- ],
- })
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "Query" }],
- },
- ],
- }
-
- const result = await callTransformParams(middleware, params)
-
- const systemContent = result?.prompt[0]?.content as string
- // Chunks from same result should be joined with space
- expect(systemContent).toContain("Chunk 1 Chunk 2 Chunk 3")
- // Results should be joined with newline
- expect(systemContent).toContain("Chunk 4 Chunk 5")
- })
- })
-
- describe("Integration with real Supermemory", () => {
- // Skip these tests if no API key is available
- const shouldRunIntegration = !!process.env.SUPERMEMORY_API_KEY
-
- it.skipIf(!shouldRunIntegration)(
- "should work with real Supermemory API",
- async () => {
- const supermemory = new Supermemory({
- apiKey: process.env.SUPERMEMORY_API_KEY ?? "",
- baseURL: TEST_CONFIG.baseURL,
- })
-
- const middleware = createSupermemoryMiddleware(
- supermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "Tell me about programming" }],
- },
- ],
- }
-
- const result = await callTransformParams(middleware, params)
-
- expect(result?.prompt).toBeDefined()
- expect(result?.prompt.length).toBeGreaterThanOrEqual(1)
- },
- )
-
- it.skipIf(!shouldRunIntegration)(
- "should create wrapped model and use it",
- async () => {
- process.env.SUPERMEMORY_API_KEY = TEST_CONFIG.apiKey
-
- const mockModel = createMockLanguageModel()
- const wrappedModel = withSupermemory(
- mockModel,
- TEST_CONFIG.containerTag,
- )
-
- expect(wrappedModel).toBeDefined()
- expect(wrappedModel.provider).toBe("test-provider")
- expect(wrappedModel.modelId).toBe("test-model")
- },
- )
- })
-
- describe("Edge cases", () => {
- // biome-ignore lint/suspicious/noExplicitAny: Mock object for testing
- let mockSupermemory: any
-
- beforeEach(() => {
- mockSupermemory = {
- search: {
- execute: vi.fn(),
- },
- }
- })
-
- it("should handle Supermemory API errors gracefully", async () => {
- mockSupermemory.search.execute.mockRejectedValue(new Error("API Error"))
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "Hello" }],
- },
- ],
- }
-
- await expect(callTransformParams(middleware, params)).rejects.toThrow(
- "API Error",
- )
- })
-
- it("should handle empty prompt array", async () => {
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [],
- }
-
- const result = await callTransformParams(middleware, params)
-
- expect(result).toEqual(params)
- expect(mockSupermemory.search.execute).not.toHaveBeenCalled()
- })
-
- it("should handle user message with empty content array", async () => {
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [],
- },
- ],
- }
-
- const result = await callTransformParams(middleware, params)
-
- expect(result).toEqual(params)
- expect(mockSupermemory.search.execute).not.toHaveBeenCalled()
- })
-
- it("should use correct container tag", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([]),
- )
-
- const customTag = "my-custom-project"
- const middleware = createSupermemoryMiddleware(mockSupermemory, customTag)
-
- const params: LanguageModelV2CallOptions = {
- prompt: [
- {
- role: "user",
- content: [{ type: "text", text: "Query" }],
- },
- ],
- }
-
- await callTransformParams(middleware, params)
-
- expect(mockSupermemory.search.execute).toHaveBeenCalledWith({
- q: "Query",
- containerTags: [customTag],
- })
- })
-
- it("should not mutate the original params.prompt array", async () => {
- mockSupermemory.search.execute.mockResolvedValue(
- createMockSearchResponse([]),
- )
-
- const middleware = createSupermemoryMiddleware(
- mockSupermemory,
- TEST_CONFIG.containerTag,
- )
-
- const originalPrompt = [
- { role: "user" as const, content: [{ type: "text" as const, text: "First" }] },
- { role: "user" as const, content: [{ type: "text" as const, text: "Last" }] }
- ]
- const params: LanguageModelV2CallOptions = { prompt: [...originalPrompt] }
-
- await callTransformParams(middleware, params)
-
- // Verify order is unchanged
- expect(params.prompt[0]?.content[0]).toBe("First")
- expect(params.prompt[1]?.content[0]).toBe("Last")
- })
-
- })
-})
diff --git a/packages/tools/test/with-supermemory/integration.test.ts b/packages/tools/test/with-supermemory/integration.test.ts
new file mode 100644
index 00000000..98b7e853
--- /dev/null
+++ b/packages/tools/test/with-supermemory/integration.test.ts
@@ -0,0 +1,538 @@
+/**
+ * Integration tests for the withSupermemory wrapper
+ */
+
+import { describe, it, expect, vi } from "vitest"
+import { withSupermemory } from "../../src/vercel"
+import type {
+ LanguageModelV2,
+ LanguageModelV2CallOptions,
+} from "@ai-sdk/provider"
+import "dotenv/config"
+
+const INTEGRATION_CONFIG = {
+ apiKey: process.env.SUPERMEMORY_API_KEY || "",
+ baseUrl: process.env.SUPERMEMORY_BASE_URL || "https://api.supermemory.ai",
+ containerTag: "integration-test-vercel-wrapper",
+}
+
+const shouldRunIntegration = !!process.env.SUPERMEMORY_API_KEY
+
+/**
+ * Creates a mock language model that captures params for assertion
+ * while simulating realistic LLM behavior.
+ */
+const createIntegrationMockModel = () => {
+ let capturedGenerateParams: LanguageModelV2CallOptions | null = null
+ let capturedStreamParams: LanguageModelV2CallOptions | null = null
+
+ const model: LanguageModelV2 = {
+ specificationVersion: "v2",
+ provider: "integration-test",
+ modelId: "mock-model",
+ supportedUrls: {},
+ doGenerate: vi.fn(async (params: LanguageModelV2CallOptions) => {
+ capturedGenerateParams = params
+ return {
+ content: [{ type: "text" as const, text: "Mock response from LLM" }],
+ finishReason: "stop" as const,
+ usage: {
+ promptTokens: 10,
+ completionTokens: 5,
+ inputTokens: 10,
+ outputTokens: 5,
+ totalTokens: 15,
+ },
+ rawCall: { rawPrompt: params.prompt, rawSettings: {} },
+ warnings: [],
+ }
+ }),
+ doStream: vi.fn(async (params: LanguageModelV2CallOptions) => {
+ capturedStreamParams = params
+ const chunks = ["Mock ", "streamed ", "response"]
+ return {
+ stream: new ReadableStream({
+ async start(controller) {
+ for (const chunk of chunks) {
+ controller.enqueue({ type: "text-delta", delta: chunk })
+ }
+ controller.enqueue({
+ type: "finish",
+ finishReason: "stop",
+ usage: {
+ promptTokens: 10,
+ completionTokens: 5,
+ inputTokens: 10,
+ outputTokens: 5,
+ totalTokens: 15,
+ },
+ })
+ controller.close()
+ },
+ }),
+ rawCall: { rawPrompt: params.prompt, rawSettings: {} },
+ }
+ }),
+ }
+
+ return {
+ model,
+ getCapturedGenerateParams: () => capturedGenerateParams,
+ getCapturedStreamParams: () => capturedStreamParams,
+ reset: () => {
+ capturedGenerateParams = null
+ capturedStreamParams = null
+ vi.mocked(model.doGenerate).mockClear()
+ vi.mocked(model.doStream).mockClear()
+ },
+ }
+}
+
+describe.skipIf(!shouldRunIntegration)(
+ "Integration: withSupermemory wrapper with real API",
+ () => {
+ describe("doGenerate flow", () => {
+ it("should fetch real memories and inject into params passed to model", async () => {
+ const { model, getCapturedGenerateParams } =
+ createIntegrationMockModel()
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Hello, what do you know?" }],
+ },
+ ],
+ })
+
+ const capturedParams = getCapturedGenerateParams()
+ expect(capturedParams).not.toBeNull()
+ expect(capturedParams?.prompt[0]?.role).toBe("system")
+ // Memory content injected (may be empty if no memories exist)
+ expect(typeof capturedParams?.prompt[0]?.content).toBe("string")
+ })
+
+ it("should save memory when addMemory is always", async () => {
+ const { model } = createIntegrationMockModel()
+ const fetchSpy = vi.spyOn(globalThis, "fetch")
+
+ const conversationId = `test-generate-${Date.now()}`
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ addMemory: "always",
+ conversationId,
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [
+ { type: "text", text: "Remember that I love integration tests" },
+ ],
+ },
+ ],
+ })
+
+ // Wait for background save to complete
+ await new Promise((resolve) => setTimeout(resolve, 2000))
+
+ // Verify /v4/conversations was called for saving
+ const conversationCalls = fetchSpy.mock.calls.filter(
+ (call) =>
+ typeof call[0] === "string" &&
+ call[0].includes("/v4/conversations"),
+ )
+ expect(conversationCalls.length).toBeGreaterThan(0)
+
+ fetchSpy.mockRestore()
+ })
+
+ it("should work with conversationId for grouped memories", async () => {
+ const { model, getCapturedGenerateParams } =
+ createIntegrationMockModel()
+
+ const conversationId = `test-conversation-${Date.now()}`
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ conversationId,
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "First message in conversation" }],
+ },
+ ],
+ })
+
+ const capturedParams = getCapturedGenerateParams()
+ expect(capturedParams).not.toBeNull()
+ expect(model.doGenerate).toHaveBeenCalledTimes(1)
+ })
+ })
+
+ describe("doStream flow", () => {
+ it("should fetch memories and stream response", async () => {
+ const { model, getCapturedStreamParams } = createIntegrationMockModel()
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ const { stream } = await wrapped.doStream({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Stream test message" }],
+ },
+ ],
+ })
+
+ // Consume the stream
+ const reader = stream.getReader()
+ const chunks: Array<{ type: string; delta?: string }> = []
+ while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+ chunks.push(value as { type: string; delta?: string })
+ }
+
+ const capturedParams = getCapturedStreamParams()
+ expect(capturedParams).not.toBeNull()
+ expect(capturedParams?.prompt[0]?.role).toBe("system")
+ expect(chunks.some((c) => c.type === "text-delta")).toBe(true)
+ })
+
+ it("should capture streamed text and save memory when addMemory is always", async () => {
+ const { model } = createIntegrationMockModel()
+ const fetchSpy = vi.spyOn(globalThis, "fetch")
+
+ const conversationId = `test-stream-${Date.now()}`
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ addMemory: "always",
+ conversationId,
+ })
+
+ const { stream } = await wrapped.doStream({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Stream and save this memory" }],
+ },
+ ],
+ })
+
+ // Consume the stream to trigger flush
+ const reader = stream.getReader()
+ while (true) {
+ const { done } = await reader.read()
+ if (done) break
+ }
+
+ // Wait for background save
+ await new Promise((resolve) => setTimeout(resolve, 2000))
+
+ // Verify save was attempted
+ const conversationCalls = fetchSpy.mock.calls.filter(
+ (call) =>
+ typeof call[0] === "string" &&
+ call[0].includes("/v4/conversations"),
+ )
+ expect(conversationCalls.length).toBeGreaterThan(0)
+
+ fetchSpy.mockRestore()
+ })
+
+ it("should handle text-delta chunks correctly", async () => {
+ const { model } = createIntegrationMockModel()
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ const { stream } = await wrapped.doStream({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Test chunk handling" }],
+ },
+ ],
+ })
+
+ const reader = stream.getReader()
+ const textDeltas: string[] = []
+ while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+ if (
+ (value as { type: string; delta?: string }).type === "text-delta"
+ ) {
+ textDeltas.push(
+ (value as { type: string; delta?: string }).delta || "",
+ )
+ }
+ }
+
+ expect(textDeltas.join("")).toBe("Mock streamed response")
+ })
+ })
+
+ describe("Mode variations", () => {
+ it("profile mode should fetch profile memories", async () => {
+ const { model } = createIntegrationMockModel()
+ const fetchSpy = vi.spyOn(globalThis, "fetch")
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Profile mode test" }],
+ },
+ ],
+ })
+
+ // Verify /v4/profile was called
+ const profileCalls = fetchSpy.mock.calls.filter(
+ (call) =>
+ typeof call[0] === "string" && call[0].includes("/v4/profile"),
+ )
+ expect(profileCalls.length).toBeGreaterThan(0)
+
+ // Verify the request body does NOT contain 'q' for profile mode
+ const profileCall = profileCalls[0]
+ if (profileCall?.[1]) {
+ const body = JSON.parse((profileCall[1] as RequestInit).body as string)
+ expect(body.q).toBeUndefined()
+ }
+
+ fetchSpy.mockRestore()
+ })
+
+ it("query mode should include query in search", async () => {
+ const { model } = createIntegrationMockModel()
+ const fetchSpy = vi.spyOn(globalThis, "fetch")
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "query",
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "What are my favorite foods?" }],
+ },
+ ],
+ })
+
+ // Verify /v4/profile was called with query
+ const profileCalls = fetchSpy.mock.calls.filter(
+ (call) =>
+ typeof call[0] === "string" && call[0].includes("/v4/profile"),
+ )
+ expect(profileCalls.length).toBeGreaterThan(0)
+
+ // Verify the request body contains 'q'
+ const profileCall = profileCalls[0]
+ if (profileCall?.[1]) {
+ const body = JSON.parse((profileCall[1] as RequestInit).body as string)
+ expect(body.q).toBe("What are my favorite foods?")
+ }
+
+ fetchSpy.mockRestore()
+ })
+
+ it("full mode should include both profile and query", async () => {
+ const { model } = createIntegrationMockModel()
+ const fetchSpy = vi.spyOn(globalThis, "fetch")
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "full",
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Full mode query test" }],
+ },
+ ],
+ })
+
+ // Verify /v4/profile was called with query
+ const profileCalls = fetchSpy.mock.calls.filter(
+ (call) =>
+ typeof call[0] === "string" && call[0].includes("/v4/profile"),
+ )
+ expect(profileCalls.length).toBeGreaterThan(0)
+
+ const profileCall = profileCalls[0]
+ if (profileCall?.[1]) {
+ const body = JSON.parse((profileCall[1] as RequestInit).body as string)
+ expect(body.q).toBe("Full mode query test")
+ }
+
+ fetchSpy.mockRestore()
+ })
+ })
+
+ describe("Options", () => {
+ it("promptTemplate should customize memory formatting", async () => {
+ const { model, getCapturedGenerateParams } =
+ createIntegrationMockModel()
+
+ const customTemplate = (data: {
+ userMemories: string
+ generalSearchMemories: string
+ }) => `<custom-memories>${data.userMemories}</custom-memories>`
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ promptTemplate: customTemplate,
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Custom template test" }],
+ },
+ ],
+ })
+
+ const capturedParams = getCapturedGenerateParams()
+ expect(capturedParams?.prompt[0]?.content).toMatch(
+ /<custom-memories>.*<\/custom-memories>/s,
+ )
+ })
+
+ it("verbose mode should not break functionality", async () => {
+ const { model, getCapturedGenerateParams } =
+ createIntegrationMockModel()
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ verbose: true,
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Verbose mode test" }],
+ },
+ ],
+ })
+
+ const capturedParams = getCapturedGenerateParams()
+ expect(capturedParams).not.toBeNull()
+ expect(model.doGenerate).toHaveBeenCalledTimes(1)
+ })
+
+ it("custom baseUrl should be used for API calls", async () => {
+ const { model } = createIntegrationMockModel()
+ const fetchSpy = vi.spyOn(globalThis, "fetch")
+
+ // Use the configured base URL (or default)
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ baseUrl: INTEGRATION_CONFIG.baseUrl,
+ })
+
+ await wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Base URL test" }],
+ },
+ ],
+ })
+
+ // Verify the correct base URL was used
+ const profileCalls = fetchSpy.mock.calls.filter(
+ (call) =>
+ typeof call[0] === "string" && call[0].includes("/v4/profile"),
+ )
+ expect(profileCalls.length).toBeGreaterThan(0)
+
+ const url = profileCalls[0]?.[0] as string
+ expect(url.startsWith(INTEGRATION_CONFIG.baseUrl)).toBe(true)
+
+ fetchSpy.mockRestore()
+ })
+ })
+
+ describe("Error scenarios", () => {
+ it("should propagate model errors", async () => {
+ const { model } = createIntegrationMockModel()
+
+ // Override doGenerate to throw an error
+ vi.mocked(model.doGenerate).mockRejectedValueOnce(
+ new Error("Model error"),
+ )
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: INTEGRATION_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ await expect(
+ wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Error test" }],
+ },
+ ],
+ }),
+ ).rejects.toThrow("Model error")
+ })
+
+ it("should handle invalid API key gracefully", async () => {
+ const { model } = createIntegrationMockModel()
+
+ const wrapped = withSupermemory(model, INTEGRATION_CONFIG.containerTag, {
+ apiKey: "invalid-api-key-12345",
+ mode: "profile",
+ })
+
+ await expect(
+ wrapped.doGenerate({
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Invalid key test" }],
+ },
+ ],
+ }),
+ ).rejects.toThrow()
+ })
+ })
+ },
+)
diff --git a/packages/tools/test/with-supermemory/unit.test.ts b/packages/tools/test/with-supermemory/unit.test.ts
new file mode 100644
index 00000000..b20eb6f2
--- /dev/null
+++ b/packages/tools/test/with-supermemory/unit.test.ts
@@ -0,0 +1,367 @@
+/**
+ * Unit tests for the withSupermemory wrapper
+ */
+
+import { describe, it, expect, beforeEach, vi, afterEach } from "vitest"
+import { withSupermemory } from "../../src/vercel"
+import {
+ createSupermemoryContext,
+ transformParamsWithMemory,
+} from "../../src/vercel/middleware"
+import type {
+ LanguageModelV2,
+ LanguageModelV2CallOptions,
+ LanguageModelV2Message,
+} from "@ai-sdk/provider"
+import "dotenv/config"
+
+// Test configuration
+const TEST_CONFIG = {
+ apiKey: process.env.SUPERMEMORY_API_KEY || "test-api-key",
+ baseURL: process.env.SUPERMEMORY_BASE_URL || "https://api.supermemory.ai",
+ containerTag: "test-vercel-wrapper",
+}
+
+// Mock language model for testing
+const createMockLanguageModel = (): LanguageModelV2 => ({
+ specificationVersion: "v2",
+ provider: "test-provider",
+ modelId: "test-model",
+ supportedUrls: {},
+ doGenerate: vi.fn(),
+ doStream: vi.fn(),
+})
+
+// Mock profile API response
+const createMockProfileResponse = (
+ staticMemories: string[] = [],
+ dynamicMemories: string[] = [],
+ searchResults: string[] = [],
+) => ({
+ profile: {
+ static: staticMemories.map((memory) => ({ memory })),
+ dynamic: dynamicMemories.map((memory) => ({ memory })),
+ },
+ searchResults: {
+ results: searchResults.map((memory) => ({ memory })),
+ },
+})
+
+describe("Unit: withSupermemory", () => {
+ let originalEnv: string | undefined
+ let originalFetch: typeof globalThis.fetch
+
+ beforeEach(() => {
+ originalEnv = process.env.SUPERMEMORY_API_KEY
+ originalFetch = globalThis.fetch
+ vi.clearAllMocks()
+ })
+
+ afterEach(() => {
+ if (originalEnv) {
+ process.env.SUPERMEMORY_API_KEY = originalEnv
+ } else {
+ delete process.env.SUPERMEMORY_API_KEY
+ }
+ globalThis.fetch = originalFetch
+ })
+
+ describe("Environment validation", () => {
+ it("should throw error if SUPERMEMORY_API_KEY is not set", () => {
+ delete process.env.SUPERMEMORY_API_KEY
+
+ const mockModel = createMockLanguageModel()
+
+ expect(() => {
+ withSupermemory(mockModel, TEST_CONFIG.containerTag)
+ }).toThrow("SUPERMEMORY_API_KEY is not set")
+ })
+
+ it("should successfully create wrapped model with valid API key", () => {
+ process.env.SUPERMEMORY_API_KEY = "test-key"
+
+ const mockModel = createMockLanguageModel()
+ const wrappedModel = withSupermemory(mockModel, TEST_CONFIG.containerTag)
+
+ expect(wrappedModel).toBeDefined()
+ expect(wrappedModel.specificationVersion).toBe("v2")
+ })
+ })
+
+ describe("Memory caching", () => {
+ let fetchMock: ReturnType<typeof vi.fn>
+
+ beforeEach(() => {
+ fetchMock = vi.fn()
+ globalThis.fetch = fetchMock as unknown as typeof fetch
+ })
+
+ it("should cache memories on first call (new turn)", async () => {
+ fetchMock.mockResolvedValue({
+ ok: true,
+ json: () =>
+ Promise.resolve(createMockProfileResponse(["Cached memory"])),
+ })
+
+ const ctx = createSupermemoryContext({
+ containerTag: TEST_CONFIG.containerTag,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ const params: LanguageModelV2CallOptions = {
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Hello" }],
+ },
+ ],
+ }
+
+ await transformParamsWithMemory(params, ctx)
+
+ expect(ctx.memoryCache).toBeDefined()
+ const turnKey = `${TEST_CONFIG.containerTag}::profile:Hello`
+ const cachedMemories = ctx.memoryCache.get(turnKey)
+ expect(cachedMemories).toBeDefined()
+ expect(cachedMemories).toContain("Cached memory")
+ expect(fetchMock).toHaveBeenCalledTimes(1)
+ })
+
+ it("should use cached memories on continuation step (no new fetch)", async () => {
+ fetchMock.mockResolvedValue({
+ ok: true,
+ json: () =>
+ Promise.resolve(createMockProfileResponse(["Cached memory"])),
+ })
+
+ const ctx = createSupermemoryContext({
+ containerTag: TEST_CONFIG.containerTag,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ // Step 1: New turn (user message last)
+ const step1Params: LanguageModelV2CallOptions = {
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Hello" }],
+ },
+ ],
+ }
+ await transformParamsWithMemory(step1Params, ctx)
+ expect(fetchMock).toHaveBeenCalledTimes(1)
+
+ // Step 2: Continuation (assistant/tool after user)
+ const step2Params: LanguageModelV2CallOptions = {
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Hello" }],
+ },
+ {
+ role: "assistant",
+ content: [
+ {
+ type: "tool-call",
+ toolCallId: "call-1",
+ toolName: "search",
+ input: {},
+ },
+ ],
+ } as unknown as LanguageModelV2Message,
+ {
+ role: "tool",
+ content: [
+ {
+ type: "tool-result",
+ toolCallId: "call-1",
+ toolName: "search",
+ output: [{ type: "text", text: "some result" }],
+ },
+ ],
+ } as unknown as LanguageModelV2Message,
+ ],
+ }
+
+ const result = await transformParamsWithMemory(step2Params, ctx)
+
+ // Should NOT have called fetch again
+ expect(fetchMock).toHaveBeenCalledTimes(1)
+ // But should still have injected memories
+ expect(result.prompt[0]?.role).toBe("system")
+ expect(result.prompt[0]?.content).toContain("Cached memory")
+ })
+
+ it("should refetch memories on new user turn", async () => {
+ let callCount = 0
+ fetchMock.mockImplementation(() => {
+ callCount++
+ return Promise.resolve({
+ ok: true,
+ json: () =>
+ Promise.resolve(
+ createMockProfileResponse([`Memory from call ${callCount}`]),
+ ),
+ })
+ })
+
+ const ctx = createSupermemoryContext({
+ containerTag: TEST_CONFIG.containerTag,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ // First turn
+ const turn1Params: LanguageModelV2CallOptions = {
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Hello" }],
+ },
+ ],
+ }
+ const result1 = await transformParamsWithMemory(turn1Params, ctx)
+ expect(fetchMock).toHaveBeenCalledTimes(1)
+ expect(result1.prompt[0]?.content).toContain("Memory from call 1")
+
+ // Second turn (different user message)
+ const turn2Params: LanguageModelV2CallOptions = {
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Hello" }],
+ },
+ {
+ role: "assistant",
+ content: [{ type: "text", text: "Hi there!" }],
+ },
+ {
+ role: "user",
+ content: [{ type: "text", text: "What is my name?" }],
+ },
+ ],
+ }
+ const result2 = await transformParamsWithMemory(turn2Params, ctx)
+
+ // Should have called fetch again for new turn
+ expect(fetchMock).toHaveBeenCalledTimes(2)
+ expect(result2.prompt[0]?.content).toContain("Memory from call 2")
+ })
+ })
+
+ describe("Edge cases", () => {
+ let fetchMock: ReturnType<typeof vi.fn>
+
+ beforeEach(() => {
+ fetchMock = vi.fn()
+ globalThis.fetch = fetchMock as unknown as typeof fetch
+ })
+
+ it("should handle API errors gracefully", async () => {
+ fetchMock.mockResolvedValue({
+ ok: false,
+ status: 500,
+ statusText: "Internal Server Error",
+ text: () => Promise.resolve("Server error"),
+ })
+
+ const ctx = createSupermemoryContext({
+ containerTag: TEST_CONFIG.containerTag,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ const params: LanguageModelV2CallOptions = {
+ prompt: [
+ {
+ role: "user",
+ content: [{ type: "text", text: "Hello" }],
+ },
+ ],
+ }
+
+ await expect(transformParamsWithMemory(params, ctx)).rejects.toThrow(
+ "Supermemory profile search failed",
+ )
+ })
+
+ it("should handle empty prompt array", async () => {
+ const ctx = createSupermemoryContext({
+ containerTag: TEST_CONFIG.containerTag,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "query",
+ })
+
+ const params: LanguageModelV2CallOptions = {
+ prompt: [],
+ }
+
+ const result = await transformParamsWithMemory(params, ctx)
+
+ expect(result).toEqual(params)
+ expect(fetchMock).not.toHaveBeenCalled()
+ })
+
+ it("should handle user message with empty content array in query mode", async () => {
+ const ctx = createSupermemoryContext({
+ containerTag: TEST_CONFIG.containerTag,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "query",
+ })
+
+ const params: LanguageModelV2CallOptions = {
+ prompt: [
+ {
+ role: "user",
+ content: [],
+ },
+ ],
+ }
+
+ const result = await transformParamsWithMemory(params, ctx)
+
+ expect(result).toEqual(params)
+ expect(fetchMock).not.toHaveBeenCalled()
+ })
+
+ it("should not mutate the original params.prompt array", async () => {
+ fetchMock.mockResolvedValue({
+ ok: true,
+ json: () => Promise.resolve(createMockProfileResponse(["Memory"])),
+ })
+
+ const ctx = createSupermemoryContext({
+ containerTag: TEST_CONFIG.containerTag,
+ apiKey: TEST_CONFIG.apiKey,
+ mode: "profile",
+ })
+
+ const originalPrompt = [
+ {
+ role: "user" as const,
+ content: [{ type: "text" as const, text: "First" }],
+ },
+ {
+ role: "user" as const,
+ content: [{ type: "text" as const, text: "Last" }],
+ },
+ ]
+ const params: LanguageModelV2CallOptions = { prompt: [...originalPrompt] }
+
+ await transformParamsWithMemory(params, ctx)
+
+ // Verify original array is unchanged
+ expect(params.prompt).toHaveLength(2)
+ expect(
+ (params.prompt[0] as { content: Array<{ text: string }> }).content[0]
+ ?.text,
+ ).toBe("First")
+ expect(
+ (params.prompt[1] as { content: Array<{ text: string }> }).content[0]
+ ?.text,
+ ).toBe("Last")
+ })
+ })
+})