diff options
| author | MaheshtheDev <[email protected]> | 2025-12-24 01:36:03 +0000 |
|---|---|---|
| committer | MaheshtheDev <[email protected]> | 2025-12-24 01:36:03 +0000 |
| commit | d095bd234e4511bca137e95e67dd98c3c4e2a5d8 (patch) | |
| tree | c61f1a50651f4c68373a23772905a9c9fc4fb259 /packages/tools | |
| parent | bump package (diff) | |
| download | supermemory-d095bd234e4511bca137e95e67dd98c3c4e2a5d8.tar.xz supermemory-d095bd234e4511bca137e95e67dd98c3c4e2a5d8.zip | |
feat(@supermemory/tools): vercel ai sdk compatbile with v5 and v6 (#628)12-23-feat_supermemory_tools_vercel_ai_sdk_compatbile_with_v5_and_v6
Diffstat (limited to 'packages/tools')
| -rw-r--r-- | packages/tools/package.json | 7 | ||||
| -rw-r--r-- | packages/tools/src/vercel/index.ts | 157 | ||||
| -rw-r--r-- | packages/tools/src/vercel/memory-prompt.ts | 36 | ||||
| -rw-r--r-- | packages/tools/src/vercel/middleware.ts | 233 | ||||
| -rw-r--r-- | packages/tools/src/vercel/util.ts | 31 |
5 files changed, 299 insertions, 165 deletions
diff --git a/packages/tools/package.json b/packages/tools/package.json index d11353f0..d56a7dd1 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.3.61", + "version": "1.3.62", "description": "Memory tools for AI SDK and OpenAI function calling with supermemory", "scripts": { "build": "tsdown", @@ -13,13 +13,13 @@ "dependencies": { "@ai-sdk/anthropic": "^2.0.25", "@ai-sdk/openai": "^2.0.23", - "@ai-sdk/provider": "^2.0.0", "ai": "^5.0.29", "openai": "^4.104.0", "supermemory": "^3.0.0-alpha.26", "zod": "^4.1.5" }, "devDependencies": { + "@ai-sdk/provider": "^3.0.0", "@total-typescript/tsconfig": "^1.0.4", "@types/bun": "^1.2.21", "dotenv": "^16.6.1", @@ -28,6 +28,9 @@ "vitest": "^3.2.4", "@anthropic-ai/sdk": "^0.65.0" }, + "peerDependencies": { + "@ai-sdk/provider": "^2.0.0 || ^3.0.0" + }, "main": "./dist/index.js", "module": "./dist/index.js", "types": "./dist/index.d.ts", diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts index 5c74d9e5..0eedc753 100644 --- a/packages/tools/src/vercel/index.ts +++ b/packages/tools/src/vercel/index.ts @@ -1,25 +1,37 @@ -import type { LanguageModelV2 } from "@ai-sdk/provider" -import { wrapLanguageModel } from "ai" -import { createSupermemoryMiddleware } from "./middleware" +import { + type LanguageModel, + type LanguageModelCallOptions, + type LanguageModelStreamPart, + getLastUserMessage, +} from "./util" +import { + createSupermemoryContext, + transformParamsWithMemory, + extractAssistantResponseText, + saveMemoryAfterResponse, +} from "./middleware" interface WrapVercelLanguageModelOptions { - conversationId?: string; - verbose?: boolean; - mode?: "profile" | "query" | "full"; - addMemory?: "always" | "never"; - apiKey?: string; - baseUrl?: string; + conversationId?: string + verbose?: boolean + mode?: "profile" | "query" | "full" + addMemory?: "always" | "never" + apiKey?: string + baseUrl?: string } /** * Wraps a language model with supermemory middleware to automatically inject relevant memories * into the system prompt based on the user's message content. * - * This middleware searches the supermemory API for relevant memories using the container tag + * This wrapper searches the supermemory API for relevant memories using the container tag * and user message, then either appends memories to an existing system prompt or creates * a new system prompt with the memories. * - * @param model - The language model to wrap with supermemory capabilities + * Supports both Vercel AI SDK 5 (LanguageModelV2) and SDK 6 (LanguageModelV3) via runtime + * detection of `model.specificationVersion`. + * + * @param model - The language model to wrap with supermemory capabilities (V2 or V3) * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) * @param options - Optional configuration options for the middleware * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation @@ -51,29 +63,124 @@ interface WrapVercelLanguageModelOptions { * @throws {Error} When neither `options.apiKey` nor `process.env.SUPERMEMORY_API_KEY` are set * @throws {Error} When supermemory API request fails */ -const wrapVercelLanguageModel = ( - model: LanguageModelV2, +const wrapVercelLanguageModel = <T extends LanguageModel>( + model: T, containerTag: string, options?: WrapVercelLanguageModelOptions, -): LanguageModelV2 => { +): T => { const providedApiKey = options?.apiKey ?? process.env.SUPERMEMORY_API_KEY if (!providedApiKey) { - throw new Error("SUPERMEMORY_API_KEY is not set — provide it via `options.apiKey` or set `process.env.SUPERMEMORY_API_KEY`") + throw new Error( + "SUPERMEMORY_API_KEY is not set — provide it via `options.apiKey` or set `process.env.SUPERMEMORY_API_KEY`", + ) } - const conversationId = options?.conversationId - const verbose = options?.verbose ?? false - const mode = options?.mode ?? "profile" - const addMemory = options?.addMemory ?? "never" - const baseUrl = options?.baseUrl - - const wrappedModel = wrapLanguageModel({ - model, - middleware: createSupermemoryMiddleware(containerTag, providedApiKey, conversationId, verbose, mode, addMemory, baseUrl), + const ctx = createSupermemoryContext({ + containerTag, + apiKey: providedApiKey, + conversationId: options?.conversationId, + verbose: options?.verbose ?? false, + mode: options?.mode ?? "profile", + addMemory: options?.addMemory ?? "never", + baseUrl: options?.baseUrl, }) + const wrappedModel = { + ...model, + + doGenerate: async (params: LanguageModelCallOptions) => { + try { + const transformedParams = await transformParamsWithMemory(params, ctx) + + // biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3 + const result = await model.doGenerate(transformedParams as any) + + const userMessage = getLastUserMessage(params) + if (ctx.addMemory === "always" && userMessage && userMessage.trim()) { + const assistantResponseText = extractAssistantResponseText( + result.content as unknown[], + ) + saveMemoryAfterResponse( + ctx.client, + ctx.containerTag, + ctx.conversationId, + assistantResponseText, + params, + ctx.logger, + ctx.apiKey, + ctx.normalizedBaseUrl, + ) + } + + return result + } catch (error) { + ctx.logger.error("Error generating response", { + error: error instanceof Error ? error.message : "Unknown error", + }) + throw error + } + }, + + doStream: async (params: LanguageModelCallOptions) => { + let generatedText = "" + + try { + const transformedParams = await transformParamsWithMemory(params, ctx) + + const { stream, ...rest } = await model.doStream( + // biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3 + transformedParams as any, + ) + + const transformStream = new TransformStream< + LanguageModelStreamPart, + LanguageModelStreamPart + >({ + transform(chunk, controller) { + if (chunk.type === "text-delta") { + generatedText += chunk.delta + } + controller.enqueue(chunk) + }, + flush: async () => { + const userMessage = getLastUserMessage(params) + if ( + ctx.addMemory === "always" && + userMessage && + userMessage.trim() + ) { + saveMemoryAfterResponse( + ctx.client, + ctx.containerTag, + ctx.conversationId, + generatedText, + params, + ctx.logger, + ctx.apiKey, + ctx.normalizedBaseUrl, + ) + } + }, + }) + + return { + stream: stream.pipeThrough(transformStream), + ...rest, + } + } catch (error) { + ctx.logger.error("Error streaming response", { + error: error instanceof Error ? error.message : "Unknown error", + }) + throw error + } + }, + } as T + return wrappedModel } -export { wrapVercelLanguageModel as withSupermemory, type WrapVercelLanguageModelOptions as WithSupermemoryOptions } +export { + wrapVercelLanguageModel as withSupermemory, + type WrapVercelLanguageModelOptions as WithSupermemoryOptions, +} diff --git a/packages/tools/src/vercel/memory-prompt.ts b/packages/tools/src/vercel/memory-prompt.ts index e0b51780..1a9a05dd 100644 --- a/packages/tools/src/vercel/memory-prompt.ts +++ b/packages/tools/src/vercel/memory-prompt.ts @@ -1,7 +1,10 @@ -import type { LanguageModelV2CallOptions } from "@ai-sdk/provider" import { deduplicateMemories } from "../shared" import type { Logger } from "./logger" -import { convertProfileToMarkdown, type ProfileStructure } from "./util" +import { + type LanguageModelCallOptions, + convertProfileToMarkdown, + type ProfileStructure, +} from "./util" export const normalizeBaseUrl = (url?: string): string => { const defaultUrl = "https://api.supermemory.ai" @@ -50,12 +53,12 @@ const supermemoryProfileSearch = async ( } export const addSystemPrompt = async ( - params: LanguageModelV2CallOptions, + params: LanguageModelCallOptions, containerTag: string, logger: Logger, mode: "profile" | "query" | "full", baseUrl = "https://api.supermemory.ai", -) => { +): Promise<LanguageModelCallOptions> => { const systemPromptExists = params.prompt.some( (prompt) => prompt.role === "system", ) @@ -138,21 +141,22 @@ export const addSystemPrompt = async ( if (systemPromptExists) { logger.debug("Added memories to existing system prompt") - return { - ...params, - prompt: params.prompt.map((prompt) => - prompt.role === "system" - ? { ...prompt, content: `${prompt.content} \n ${memories}` } - : prompt, - ), - } + // biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3 prompt types + const newPrompt = params.prompt.map((prompt: any) => + prompt.role === "system" + ? { ...prompt, content: `${prompt.content} \n ${memories}` } + : prompt, + ) + return { ...params, prompt: newPrompt } as LanguageModelCallOptions } logger.debug( "System prompt does not exist, created system prompt with memories", ) - return { - ...params, - prompt: [{ role: "system" as const, content: memories }, ...params.prompt], - } + const newPrompt = [ + { role: "system" as const, content: memories }, + ...params.prompt, + // biome-ignore lint/suspicious/noExplicitAny: Union type compatibility between V2 and V3 prompt types + ] as any + return { ...params, prompt: newPrompt } as LanguageModelCallOptions } diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts index 260718b2..a2dd77ee 100644 --- a/packages/tools/src/vercel/middleware.ts +++ b/packages/tools/src/vercel/middleware.ts @@ -1,8 +1,3 @@ -import type { - LanguageModelV2CallOptions, - LanguageModelV2Middleware, - LanguageModelV2StreamPart, -} from "@ai-sdk/provider" import Supermemory from "supermemory" import { addConversation, @@ -10,13 +5,15 @@ import { } from "../conversations-client" import { createLogger, type Logger } from "./logger" import { + type LanguageModelCallOptions, + type LanguageModelStreamPart, type OutputContentItem, getLastUserMessage, filterOutSupermemories, } from "./util" import { addSystemPrompt, normalizeBaseUrl } from "./memory-prompt" -const getConversationContent = (params: LanguageModelV2CallOptions) => { +export const getConversationContent = (params: LanguageModelCallOptions) => { return params.prompt .filter((msg) => msg.role !== "system" && msg.role !== "tool") .map((msg) => { @@ -35,8 +32,8 @@ const getConversationContent = (params: LanguageModelV2CallOptions) => { .join("\n\n") } -const convertToConversationMessages = ( - params: LanguageModelV2CallOptions, +export const convertToConversationMessages = ( + params: LanguageModelCallOptions, assistantResponseText: string, ): ConversationMessage[] => { const messages: ConversationMessage[] = [] @@ -95,12 +92,12 @@ const convertToConversationMessages = ( return messages } -const addMemoryTool = async ( +export const saveMemoryAfterResponse = async ( client: Supermemory, containerTag: string, conversationId: string | undefined, assistantResponseText: string, - params: LanguageModelV2CallOptions, + params: LanguageModelCallOptions, logger: Logger, apiKey: string, baseUrl: string, @@ -156,15 +153,40 @@ const addMemoryTool = async ( } } -export const createSupermemoryMiddleware = ( - containerTag: string, - apiKey: string, - conversationId?: string, - verbose = false, - mode: "profile" | "query" | "full" = "profile", - addMemory: "always" | "never" = "never", - baseUrl?: string, -): LanguageModelV2Middleware => { +export interface SupermemoryMiddlewareOptions { + containerTag: string + apiKey: string + conversationId?: string + verbose?: boolean + mode?: "profile" | "query" | "full" + addMemory?: "always" | "never" + baseUrl?: string +} + +export interface SupermemoryMiddlewareContext { + client: Supermemory + logger: Logger + containerTag: string + conversationId?: string + mode: "profile" | "query" | "full" + addMemory: "always" | "never" + normalizedBaseUrl: string + apiKey: string +} + +export const createSupermemoryContext = ( + options: SupermemoryMiddlewareOptions, +): SupermemoryMiddlewareContext => { + const { + containerTag, + apiKey, + conversationId, + verbose = false, + mode = "profile", + addMemory = "never", + baseUrl, + } = options + const logger = createLogger(verbose) const normalizedBaseUrl = normalizeBaseUrl(baseUrl) @@ -176,113 +198,92 @@ export const createSupermemoryMiddleware = ( }) return { - transformParams: async ({ params }) => { - const userMessage = getLastUserMessage(params) + client, + logger, + containerTag, + conversationId, + mode, + addMemory, + normalizedBaseUrl, + apiKey, + } +} - if (mode !== "profile") { - if (!userMessage) { - logger.debug("No user message found, skipping memory search") - return params - } - } +export const transformParamsWithMemory = async ( + params: LanguageModelCallOptions, + ctx: SupermemoryMiddlewareContext, +): Promise<LanguageModelCallOptions> => { + const userMessage = getLastUserMessage(params) - logger.info("Starting memory search", { - containerTag, - conversationId, - mode, - }) + if (ctx.mode !== "profile") { + if (!userMessage) { + ctx.logger.debug("No user message found, skipping memory search") + return params + } + } - const transformedParams = await addSystemPrompt( - params, - containerTag, - logger, - mode, - normalizedBaseUrl, - ) - return transformedParams - }, - wrapGenerate: async ({ doGenerate, params }) => { - const userMessage = getLastUserMessage(params) + ctx.logger.info("Starting memory search", { + containerTag: ctx.containerTag, + conversationId: ctx.conversationId, + mode: ctx.mode, + }) + + const transformedParams = await addSystemPrompt( + params, + ctx.containerTag, + ctx.logger, + ctx.mode, + ctx.normalizedBaseUrl, + ) + return transformedParams +} - try { - const result = await doGenerate() - const assistantResponse = result.content - const assistantResponseText = assistantResponse - .map((content) => (content.type === "text" ? content.text : "")) - .join("") +export const extractAssistantResponseText = (content: unknown[]): string => { + return (content as Array<{ type: string; text?: string }>) + .map((item) => (item.type === "text" ? item.text || "" : "")) + .join("") +} - if (addMemory === "always" && userMessage && userMessage.trim()) { - addMemoryTool( - client, - containerTag, - conversationId, - assistantResponseText, - params, - logger, - apiKey, - normalizedBaseUrl, - ) - } +export const createStreamTransform = ( + ctx: SupermemoryMiddlewareContext, + params: LanguageModelCallOptions, +): { + transform: TransformStream<LanguageModelStreamPart, LanguageModelStreamPart> + getGeneratedText: () => string +} => { + let generatedText = "" - return result - } catch (error) { - logger.error("Error generating response", { - error: error instanceof Error ? error.message : "Unknown error", - }) - throw error + const transform = new TransformStream< + LanguageModelStreamPart, + LanguageModelStreamPart + >({ + transform(chunk, controller) { + if (chunk.type === "text-delta") { + generatedText += chunk.delta } + controller.enqueue(chunk) }, - wrapStream: async ({ doStream, params }) => { + flush: async () => { const userMessage = getLastUserMessage(params) - let generatedText = "" - - try { - const { stream, ...rest } = await doStream() - const transformStream = new TransformStream< - LanguageModelV2StreamPart, - LanguageModelV2StreamPart - >({ - transform(chunk, controller) { - if (chunk.type === "text-delta") { - generatedText += chunk.delta - } - - controller.enqueue(chunk) - }, - flush: async () => { - const content: OutputContentItem[] = [] - if (generatedText) { - content.push({ - type: "text", - text: generatedText, - }) - } - - if (addMemory === "always" && userMessage && userMessage.trim()) { - addMemoryTool( - client, - containerTag, - conversationId, - generatedText, - params, - logger, - apiKey, - normalizedBaseUrl, - ) - } - }, - }) - - return { - stream: stream.pipeThrough(transformStream), - ...rest, - } - } catch (error) { - logger.error("Error streaming response", { - error: error instanceof Error ? error.message : "Unknown error", - }) - throw error + if (ctx.addMemory === "always" && userMessage && userMessage.trim()) { + saveMemoryAfterResponse( + ctx.client, + ctx.containerTag, + ctx.conversationId, + generatedText, + params, + ctx.logger, + ctx.apiKey, + ctx.normalizedBaseUrl, + ) } }, + }) + + return { + transform, + getGeneratedText: () => generatedText, } } + +export { createLogger, type Logger, type OutputContentItem } diff --git a/packages/tools/src/vercel/util.ts b/packages/tools/src/vercel/util.ts index ac36280d..572f44a3 100644 --- a/packages/tools/src/vercel/util.ts +++ b/packages/tools/src/vercel/util.ts @@ -1,4 +1,25 @@ -import type { LanguageModelV2CallOptions, LanguageModelV2Message } from "@ai-sdk/provider" +import type { + LanguageModelV2, + LanguageModelV2CallOptions, + LanguageModelV2Message, + LanguageModelV2StreamPart, + LanguageModelV3, + LanguageModelV3CallOptions, + LanguageModelV3Message, + LanguageModelV3StreamPart, +} from "@ai-sdk/provider" + +// Union types for dual SDK version support (V2 = SDK 5, V3 = SDK 6) +export type LanguageModel = LanguageModelV2 | LanguageModelV3 +export type LanguageModelCallOptions = + | LanguageModelV2CallOptions + | LanguageModelV3CallOptions +export type LanguageModelMessage = + | LanguageModelV2Message + | LanguageModelV3Message +export type LanguageModelStreamPart = + | LanguageModelV2StreamPart + | LanguageModelV3StreamPart export interface ProfileStructure { profile: { @@ -58,20 +79,18 @@ export function convertProfileToMarkdown(data: ProfileMarkdownData): string { return sections.join("\n\n") } -export const getLastUserMessage = (params: LanguageModelV2CallOptions) => { +export const getLastUserMessage = (params: LanguageModelCallOptions) => { const lastUserMessage = params.prompt .slice() .reverse() - .find((prompt: LanguageModelV2Message) => prompt.role === "user") + .find((prompt: LanguageModelMessage) => prompt.role === "user") const memories = lastUserMessage?.content .filter((content) => content.type === "text") - .map((content) => content.text) + .map((content) => (content as { type: "text"; text: string }).text) .join(" ") return memories } - export const filterOutSupermemories = (content: string) => { return content.split("User Supermemories: ")[0] } - |