diff options
| -rw-r--r-- | packages/tools/package.json | 2 | ||||
| -rw-r--r-- | packages/tools/src/vercel/index.ts | 9 | ||||
| -rw-r--r-- | packages/tools/src/vercel/middleware.ts | 47 |
3 files changed, 53 insertions, 5 deletions
diff --git a/packages/tools/package.json b/packages/tools/package.json index b96bf9cc..536d7c1c 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.2.0", + "version": "1.2.13", "description": "Memory tools for AI SDK and OpenAI function calling with supermemory", "scripts": { "build": "tsdown", diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts index 7738b930..f145a060 100644 --- a/packages/tools/src/vercel/index.ts +++ b/packages/tools/src/vercel/index.ts @@ -37,7 +37,11 @@ import { createSupermemoryMiddleware } from "./middleware" const wrapVercelLanguageModel = ( model: LanguageModelV2, containerTag: string, - options?: { verbose?: boolean; mode?: "profile" | "query" | "full" }, + options?: { + verbose?: boolean; + mode?: "profile" | "query" | "full"; + addMemory?: "always" | "never"; + }, ): LanguageModelV2 => { const SUPERMEMORY_API_KEY = process.env.SUPERMEMORY_API_KEY @@ -47,10 +51,11 @@ const wrapVercelLanguageModel = ( const verbose = options?.verbose ?? false const mode = options?.mode ?? "profile" + const addMemory = options?.addMemory ?? "never" const wrappedModel = wrapLanguageModel({ model, - middleware: createSupermemoryMiddleware(containerTag, verbose, mode), + middleware: createSupermemoryMiddleware(containerTag, verbose, mode, addMemory), }) return wrappedModel diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts index 0e021d0e..0caa33ed 100644 --- a/packages/tools/src/vercel/middleware.ts +++ b/packages/tools/src/vercel/middleware.ts @@ -3,6 +3,7 @@ import type { LanguageModelV2Middleware, LanguageModelV2Message, } from "@ai-sdk/provider" +import Supermemory from "supermemory" import { createLogger, type Logger } from "./logger" import { convertProfileToMarkdown, type ProfileStructure } from "./util" @@ -137,18 +138,60 @@ const addSystemPrompt = async ( } } +const addMemoryTool = async ( + client: Supermemory, + containerTag: string, + content: string, + logger: Logger, +): Promise<void> => { + try { + const response = await client.memories.add({ + content, + containerTags: [containerTag], + }) + + logger.info("Memory saved successfully", { + containerTag, + contentLength: content.length, + memoryId: response.id, + }) + } catch (error) { + logger.error("Error saving memory", { + error: error instanceof Error ? error.message : "Unknown error", + }) + } +} + export const createSupermemoryMiddleware = ( containerTag: string, verbose = false, mode: "profile" | "query" | "full" = "profile", + addMemory: "always" | "never" = "never" ): LanguageModelV2Middleware => { const logger = createLogger(verbose) + + const SUPERMEMORY_API_KEY = process.env.SUPERMEMORY_API_KEY + if (!SUPERMEMORY_API_KEY) { + throw new Error("SUPERMEMORY_API_KEY is not set") + } + + const client = new Supermemory({ + apiKey: SUPERMEMORY_API_KEY, + }) return { transformParams: async ({ params }) => { + const userMessage = getLastUserMessage(params) + + // Add userMessage to memories based on addMemory setting + if (addMemory === "always" && userMessage && userMessage.trim()) { + addMemoryTool(client, containerTag, userMessage, logger).catch((error) => { + logger.error("Failed to create memories", { error }) + }) + } + if (mode !== "profile") { - const lastUserMessage = getLastUserMessage(params) - if (!lastUserMessage) { + if (!userMessage) { logger.debug("No user message found, skipping memory search") return params } |