aboutsummaryrefslogtreecommitdiff
path: root/packages
diff options
context:
space:
mode:
authorsohamd22 <[email protected]>2025-10-11 03:45:06 +0000
committersohamd22 <[email protected]>2025-10-11 03:45:06 +0000
commit91a2aa5fdb9a5308eb0f2bffc35aebcf186f18db (patch)
tree214b180e73bc7e75195289a076160212d496668c /packages
parentMerge pull request #475 from Mikethebot44/feature/add-memory-dialog-ux (diff)
downloadsupermemory-91a2aa5fdb9a5308eb0f2bffc35aebcf186f18db.tar.xz
supermemory-91a2aa5fdb9a5308eb0f2bffc35aebcf186f18db.zip
create memory adding option in vercel sdk (#484)
### TL;DR Added support for automatically saving user messages to Supermemory. ### What changed? - Added a new `addMemory` option to `wrapVercelLanguageModel` that accepts either "always" or "never" (defaults to "never") - Implemented the `addMemoryTool` function to save user messages to Supermemory - Modified the middleware to check the `addMemory` setting and save the last user message when appropriate - Initialized the Supermemory client in the middleware to enable memory storage ### How to test? 1. Set the `SUPERMEMORY_API_KEY` environment variable 2. Use the `wrapVercelLanguageModel` function with the new `addMemory: "always"` option 3. Send a user message through the model 4. Verify that the message is saved to Supermemory with the specified container tag ### Why make this change? This change enables automatic memory creation from user messages, which improves the system's ability to build a knowledge base without requiring explicit memory creation calls. This is particularly useful for applications that want to automatically capture and store user interactions for future reference.
Diffstat (limited to 'packages')
-rw-r--r--packages/tools/package.json2
-rw-r--r--packages/tools/src/vercel/index.ts9
-rw-r--r--packages/tools/src/vercel/middleware.ts47
3 files changed, 53 insertions, 5 deletions
diff --git a/packages/tools/package.json b/packages/tools/package.json
index b96bf9cc..536d7c1c 100644
--- a/packages/tools/package.json
+++ b/packages/tools/package.json
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
- "version": "1.2.0",
+ "version": "1.2.13",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts
index 7738b930..f145a060 100644
--- a/packages/tools/src/vercel/index.ts
+++ b/packages/tools/src/vercel/index.ts
@@ -37,7 +37,11 @@ import { createSupermemoryMiddleware } from "./middleware"
const wrapVercelLanguageModel = (
model: LanguageModelV2,
containerTag: string,
- options?: { verbose?: boolean; mode?: "profile" | "query" | "full" },
+ options?: {
+ verbose?: boolean;
+ mode?: "profile" | "query" | "full";
+ addMemory?: "always" | "never";
+ },
): LanguageModelV2 => {
const SUPERMEMORY_API_KEY = process.env.SUPERMEMORY_API_KEY
@@ -47,10 +51,11 @@ const wrapVercelLanguageModel = (
const verbose = options?.verbose ?? false
const mode = options?.mode ?? "profile"
+ const addMemory = options?.addMemory ?? "never"
const wrappedModel = wrapLanguageModel({
model,
- middleware: createSupermemoryMiddleware(containerTag, verbose, mode),
+ middleware: createSupermemoryMiddleware(containerTag, verbose, mode, addMemory),
})
return wrappedModel
diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts
index 0e021d0e..0caa33ed 100644
--- a/packages/tools/src/vercel/middleware.ts
+++ b/packages/tools/src/vercel/middleware.ts
@@ -3,6 +3,7 @@ import type {
LanguageModelV2Middleware,
LanguageModelV2Message,
} from "@ai-sdk/provider"
+import Supermemory from "supermemory"
import { createLogger, type Logger } from "./logger"
import { convertProfileToMarkdown, type ProfileStructure } from "./util"
@@ -137,18 +138,60 @@ const addSystemPrompt = async (
}
}
+const addMemoryTool = async (
+ client: Supermemory,
+ containerTag: string,
+ content: string,
+ logger: Logger,
+): Promise<void> => {
+ try {
+ const response = await client.memories.add({
+ content,
+ containerTags: [containerTag],
+ })
+
+ logger.info("Memory saved successfully", {
+ containerTag,
+ contentLength: content.length,
+ memoryId: response.id,
+ })
+ } catch (error) {
+ logger.error("Error saving memory", {
+ error: error instanceof Error ? error.message : "Unknown error",
+ })
+ }
+}
+
export const createSupermemoryMiddleware = (
containerTag: string,
verbose = false,
mode: "profile" | "query" | "full" = "profile",
+ addMemory: "always" | "never" = "never"
): LanguageModelV2Middleware => {
const logger = createLogger(verbose)
+
+ const SUPERMEMORY_API_KEY = process.env.SUPERMEMORY_API_KEY
+ if (!SUPERMEMORY_API_KEY) {
+ throw new Error("SUPERMEMORY_API_KEY is not set")
+ }
+
+ const client = new Supermemory({
+ apiKey: SUPERMEMORY_API_KEY,
+ })
return {
transformParams: async ({ params }) => {
+ const userMessage = getLastUserMessage(params)
+
+ // Add userMessage to memories based on addMemory setting
+ if (addMemory === "always" && userMessage && userMessage.trim()) {
+ addMemoryTool(client, containerTag, userMessage, logger).catch((error) => {
+ logger.error("Failed to create memories", { error })
+ })
+ }
+
if (mode !== "profile") {
- const lastUserMessage = getLastUserMessage(params)
- if (!lastUserMessage) {
+ if (!userMessage) {
logger.debug("No user message found, skipping memory search")
return params
}