aboutsummaryrefslogtreecommitdiff
path: root/packages
diff options
context:
space:
mode:
authorsohamd22 <[email protected]>2025-10-19 22:24:00 +0000
committersohamd22 <[email protected]>2025-10-19 22:24:00 +0000
commit79f3059b2a383ca0d743dbe364dc5322c397ec56 (patch)
tree8f189d8dc6586fe4121768573cce72d5bba6a405 /packages
parentversion bump' (diff)
downloadsupermemory-79f3059b2a383ca0d743dbe364dc5322c397ec56.tar.xz
supermemory-79f3059b2a383ca0d743dbe364dc5322c397ec56.zip
add conversationId functionality to map to customId in ingestion (#499)
### TL;DR Added support for conversation grouping in Supermemory middleware through a new `conversationId` parameter. ### What changed? - Added a new `conversationId` option to the `withSupermemory` function to group messages into a single document for contextual memory generation - Updated the middleware to use this conversation ID when adding memories, using a `customId` format of `conversation:{conversationId}` - Created a new `getConversationContent` function that extracts the full conversation content from the prompt parameters - Enhanced memory storage to save entire conversations rather than just the last user message - Updated documentation and examples to demonstrate the new parameter usage ### How to test? 1. Import the `withSupermemory` function from the package 2. Create a model with memory using the new `conversationId` parameter: ```typescript const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123", { conversationId: "conversation-456", mode: "full", addMemory: "always" }) ``` 3. Use the model in a conversation and verify that messages are grouped by the conversation ID 4. Check that memories are being stored with the custom ID format `conversation:{conversationId}` ### Why make this change? This enhancement improves the contextual understanding of the AI by allowing related messages to be grouped together as a single conversation document. By using a conversation ID, the system can maintain coherent memory across multiple interactions within the same conversation thread, providing better context retrieval and more relevant responses.
Diffstat (limited to 'packages')
-rw-r--r--packages/tools/src/vercel/index.ts15
-rw-r--r--packages/tools/src/vercel/middleware.ts27
2 files changed, 37 insertions, 5 deletions
diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts
index b6871009..12717ac6 100644
--- a/packages/tools/src/vercel/index.ts
+++ b/packages/tools/src/vercel/index.ts
@@ -13,6 +13,7 @@ import { createSupermemoryMiddleware } from "./middleware"
* @param model - The language model to wrap with supermemory capabilities
* @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID)
* @param options - Optional configuration options for the middleware
+ * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation
* @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false)
* @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full"
* @param options.addMemory - Optional mode for memory search: "always" (default), "never"
@@ -24,7 +25,11 @@ import { createSupermemoryMiddleware } from "./middleware"
* import { withSupermemory } from "@supermemory/tools/ai-sdk"
* import { openai } from "@ai-sdk/openai"
*
- * const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123")
+ * const modelWithMemory = withSupermemory(openai("gpt-4"), "user-123", {
+ * conversationId: "conversation-456",
+ * mode: "full",
+ * addMemory: "always"
+ * })
*
* const result = await generateText({
* model: modelWithMemory,
@@ -38,8 +43,9 @@ import { createSupermemoryMiddleware } from "./middleware"
const wrapVercelLanguageModel = (
model: LanguageModelV2,
containerTag: string,
- options?: {
- verbose?: boolean;
+ options?: {
+ conversationId?: string;
+ verbose?: boolean;
mode?: "profile" | "query" | "full";
addMemory?: "always" | "never";
},
@@ -50,13 +56,14 @@ const wrapVercelLanguageModel = (
throw new Error("SUPERMEMORY_API_KEY is not set")
}
+ const conversationId = options?.conversationId
const verbose = options?.verbose ?? false
const mode = options?.mode ?? "profile"
const addMemory = options?.addMemory ?? "never"
const wrappedModel = wrapLanguageModel({
model,
- middleware: createSupermemoryMiddleware(containerTag, verbose, mode, addMemory),
+ middleware: createSupermemoryMiddleware(containerTag, conversationId, verbose, mode, addMemory),
})
return wrappedModel
diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts
index 86ec7b88..0abebb5a 100644
--- a/packages/tools/src/vercel/middleware.ts
+++ b/packages/tools/src/vercel/middleware.ts
@@ -132,20 +132,36 @@ const addSystemPrompt = async (
}
}
+const getConversationContent = (params: LanguageModelV2CallOptions) => {
+ return params.prompt
+ .map((msg) => {
+ const role = msg.role === "user" ? "User" : "Assistant"
+ const content = msg.content
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join(" ")
+ return `${role}: ${content}`
+ })
+ .join("\n\n")
+}
+
const addMemoryTool = async (
client: Supermemory,
containerTag: string,
content: string,
+ customId: string | undefined,
logger: Logger,
): Promise<void> => {
try {
const response = await client.memories.add({
content,
containerTags: [containerTag],
+ customId,
})
logger.info("Memory saved successfully", {
containerTag,
+ customId,
contentLength: content.length,
memoryId: response.id,
})
@@ -158,6 +174,7 @@ const addMemoryTool = async (
export const createSupermemoryMiddleware = (
containerTag: string,
+ conversationId?: string,
verbose = false,
mode: "profile" | "query" | "full" = "profile",
addMemory: "always" | "never" = "never"
@@ -173,7 +190,14 @@ export const createSupermemoryMiddleware = (
const userMessage = getLastUserMessage(params)
if (addMemory === "always" && userMessage && userMessage.trim()) {
- addMemoryTool(client, containerTag, userMessage, logger)
+ const content = conversationId
+ ? getConversationContent(params)
+ : userMessage
+ const customId = conversationId
+ ? `conversation:${conversationId}`
+ : undefined
+
+ addMemoryTool(client, containerTag, content, customId, logger)
}
if (mode !== "profile") {
@@ -185,6 +209,7 @@ export const createSupermemoryMiddleware = (
logger.info("Starting memory search", {
containerTag,
+ conversationId,
mode,
})