diff options
| author | MaheshtheDev <[email protected]> | 2025-10-28 22:28:22 +0000 |
|---|---|---|
| committer | MaheshtheDev <[email protected]> | 2025-10-28 22:28:22 +0000 |
| commit | 97071502a777647f16ab67f4215bee718d8f57af (patch) | |
| tree | f4420f44e935b8acfb9e92ca40bffaec9435a487 | |
| parent | feat: docs with new array contains (#534) (diff) | |
| download | supermemory-97071502a777647f16ab67f4215bee718d8f57af.tar.xz supermemory-97071502a777647f16ab67f4215bee718d8f57af.zip | |
feat(@supermemory/tools): capture assitant responses with filtered memory (#539)
### Added streaming support to the Supermemory middleware and improved memory handling in the AI SDK integration.
### What changed?
- Refactored the middleware architecture to support both streaming and non-streaming responses
- Extracted memory prompt functionality into a separate module (`memory-prompt.ts`)
- Added memory saving capability for streaming responses
- Improved the formatting of memory content with a "User Supermemories:" prefix
- Added utility function to filter out supermemories from content
- Created a new streaming example in the test app with a dedicated route and page
- Updated version from 1.3.0 to 1.3.1 in package.json
- Simplified installation instructions in [README.m](http://README.md)d
| -rw-r--r-- | bun.lock | 2 | ||||
| -rw-r--r-- | packages/tools/README.md | 6 | ||||
| -rw-r--r-- | packages/tools/package.json | 2 | ||||
| -rw-r--r-- | packages/tools/src/vercel/memory-prompt.ts | 119 | ||||
| -rw-r--r-- | packages/tools/src/vercel/middleware.ts | 244 | ||||
| -rw-r--r-- | packages/tools/src/vercel/util.ts | 43 | ||||
| -rw-r--r-- | packages/tools/test/chatapp/app/api/chat/route.ts | 20 | ||||
| -rw-r--r-- | packages/tools/test/chatapp/app/api/stream/route.ts | 22 | ||||
| -rw-r--r-- | packages/tools/test/chatapp/app/stream/page.tsx | 108 |
9 files changed, 405 insertions, 161 deletions
@@ -213,7 +213,7 @@ }, "packages/tools": { "name": "@supermemory/tools", - "version": "1.2.16", + "version": "1.3.0", "dependencies": { "@ai-sdk/anthropic": "^2.0.25", "@ai-sdk/openai": "^2.0.23", diff --git a/packages/tools/README.md b/packages/tools/README.md index 6db2f7f8..b9df6930 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -8,12 +8,6 @@ This package provides supermemory tools for both AI SDK and OpenAI function call ```bash npm install @supermemory/tools -# or -bun add @supermemory/tools -# or -pnpm add @supermemory/tools -# or -yarn add @supermemory/tools ``` ## Usage diff --git a/packages/tools/package.json b/packages/tools/package.json index d1589a3b..5997c1c0 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.3.0", + "version": "1.3.1", "description": "Memory tools for AI SDK and OpenAI function calling with supermemory", "scripts": { "build": "tsdown", diff --git a/packages/tools/src/vercel/memory-prompt.ts b/packages/tools/src/vercel/memory-prompt.ts new file mode 100644 index 00000000..8b2e302b --- /dev/null +++ b/packages/tools/src/vercel/memory-prompt.ts @@ -0,0 +1,119 @@ +import type { LanguageModelV2CallOptions } from "@ai-sdk/provider" +import type { Logger } from "./logger" +import { convertProfileToMarkdown, type ProfileStructure } from "./util" + +const supermemoryProfileSearch = async ( + containerTag: string, + queryText: string, +): Promise<ProfileStructure> => { + const payload = queryText + ? JSON.stringify({ + q: queryText, + containerTag: containerTag, + }) + : JSON.stringify({ + containerTag: containerTag, + }) + + try { + const response = await fetch("https://api.supermemory.ai/v4/profile", { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${process.env.SUPERMEMORY_API_KEY}`, + }, + body: payload, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => "Unknown error") + throw new Error( + `Supermemory profile search failed: ${response.status} ${response.statusText}. ${errorText}`, + ) + } + + return await response.json() + } catch (error) { + if (error instanceof Error) { + throw error + } + throw new Error(`Supermemory API request failed: ${error}`) + } +} + +export const addSystemPrompt = async ( + params: LanguageModelV2CallOptions, + containerTag: string, + logger: Logger, + mode: "profile" | "query" | "full", +) => { + const systemPromptExists = params.prompt.some( + (prompt) => prompt.role === "system", + ) + + const queryText = + mode !== "profile" + ? params.prompt + .slice() + .reverse() + .find((prompt) => prompt.role === "user") + ?.content?.filter((content) => content.type === "text") + ?.map((content) => (content.type === "text" ? content.text : "")) + ?.join(" ") || "" + : "" + + const memoriesResponse = await supermemoryProfileSearch( + containerTag, + queryText, + ) + + const memoryCountStatic = memoriesResponse.profile.static?.length || 0 + const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 + + logger.info("Memory search completed", { + containerTag, + memoryCountStatic, + memoryCountDynamic, + queryText: + queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), + mode, + }) + + const profileData = + mode !== "query" ? convertProfileToMarkdown(memoriesResponse) : "" + const searchResultsMemories = + mode !== "profile" + ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results + .map((result) => `- ${result.memory}`) + .join("\n")}` + : "" + + const memories = + `User Supermemories: \n${profileData}\n${searchResultsMemories}`.trim() + if (memories) { + logger.debug("Memory content preview", { + content: memories, + fullLength: memories.length, + }) + } + + if (systemPromptExists) { + logger.debug("Added memories to existing system prompt") + return { + ...params, + prompt: params.prompt.map((prompt) => + prompt.role === "system" + ? { ...prompt, content: `${prompt.content} \n ${memories}` } + : prompt, + ), + } + } + + logger.debug( + "System prompt does not exist, created system prompt with memories", + ) + return { + ...params, + prompt: [{ role: "system" as const, content: memories }, ...params.prompt], + } +} diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts index 586105a8..dd61e397 100644 --- a/packages/tools/src/vercel/middleware.ts +++ b/packages/tools/src/vercel/middleware.ts @@ -1,163 +1,50 @@ import type { LanguageModelV2CallOptions, LanguageModelV2Middleware, - LanguageModelV2Message, + LanguageModelV2StreamPart, } from "@ai-sdk/provider" import Supermemory from "supermemory" import { createLogger, type Logger } from "./logger" -import { convertProfileToMarkdown, type ProfileStructure } from "./util" - -const getLastUserMessage = (params: LanguageModelV2CallOptions) => { - const lastUserMessage = params.prompt - .slice().reverse() - .find((prompt: LanguageModelV2Message) => prompt.role === "user") - const memories = lastUserMessage?.content - .filter((content) => content.type === "text") - .map((content) => content.text) - .join(" ") - return memories -} - -const supermemoryprofilesearch = async ( - containerTag: string, - queryText: string, -): Promise<ProfileStructure> => { - const payload = queryText - ? JSON.stringify({ - q: queryText, - containerTag: containerTag, - }) - : JSON.stringify({ - containerTag: containerTag, - }) - - try { - const response = await fetch("https://api.supermemory.ai/v4/profile", { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${process.env.SUPERMEMORY_API_KEY}`, - }, - body: payload, - }) - - if (!response.ok) { - const errorText = await response.text().catch(() => "Unknown error") - throw new Error( - `Supermemory profile search failed: ${response.status} ${response.statusText}. ${errorText}`, - ) - } - - return await response.json() - } catch (error) { - if (error instanceof Error) { - throw error - } - throw new Error(`Supermemory API request failed: ${error}`) - } -} - -const addSystemPrompt = async ( - params: LanguageModelV2CallOptions, - containerTag: string, - logger: Logger, - mode: "profile" | "query" | "full", -) => { - const systemPromptExists = params.prompt.some( - (prompt) => prompt.role === "system", - ) - - const queryText = - mode !== "profile" - ? params.prompt - .slice().reverse() - .find((prompt) => prompt.role === "user") - ?.content?.filter((content) => content.type === "text") - ?.map((content) => (content.type === "text" ? content.text : "")) - ?.join(" ") || "" - : "" - - const memoriesResponse = await supermemoryprofilesearch( - containerTag, - queryText, - ) - - const memoryCountStatic = memoriesResponse.profile.static?.length || 0 - const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 - - logger.info("Memory search completed", { - containerTag, - memoryCountStatic, - memoryCountDynamic, - queryText: - queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), - mode, - }) - - const profileData = - mode !== "query" ? convertProfileToMarkdown(memoriesResponse) : "" - const searchResultsMemories = - mode !== "profile" - ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results - .map((result) => `- ${result.memory}`) - .join("\n")}` - : "" - - const memories = `${profileData}\n${searchResultsMemories}`.trim() - if (memories) { - logger.debug("Memory content preview", { - content: memories, - fullLength: memories.length, - }) - } - - if (systemPromptExists) { - logger.debug("Added memories to existing system prompt") - return { - ...params, - prompt: params.prompt.map((prompt) => - prompt.role === "system" - ? { ...prompt, content: `${prompt.content} \n ${memories}` } - : prompt, - ), - } - } - - logger.debug( - "System prompt does not exist, created system prompt with memories", - ) - return { - ...params, - prompt: [{ role: "system" as const, content: memories }, ...params.prompt], - } -} +import { + type OutputContentItem, + getLastUserMessage, + filterOutSupermemories, +} from "./util" +import { addSystemPrompt } from "./memory-prompt" const getConversationContent = (params: LanguageModelV2CallOptions) => { return params.prompt + .filter((msg) => msg.role !== "system" && msg.role !== "tool") .map((msg) => { const role = msg.role === "user" ? "User" : "Assistant" if (typeof msg.content === "string") { - return `${role}: ${msg.content}` + return `${role}: ${filterOutSupermemories(msg.content)}` } const content = msg.content .filter((c) => c.type === "text") - .map((c) => (c.type === "text" ? c.text : "")) + .map((c) => (c.type === "text" ? filterOutSupermemories(c.text) : "")) .join(" ") return `${role}: ${content}` }) .join("\n\n") } - const addMemoryTool = async ( client: Supermemory, containerTag: string, - content: string, - customId: string | undefined, + conversationId: string | undefined, + assistantResponseText: string, + params: LanguageModelV2CallOptions, logger: Logger, ): Promise<void> => { + const userMessage = getLastUserMessage(params) + const content = conversationId + ? `${getConversationContent(params)} \n\n Assistant: ${assistantResponseText}` + : `User: ${userMessage} \n\n Assistant: ${assistantResponseText}` + const customId = conversationId ? `conversation:${conversationId}` : undefined + try { const response = await client.memories.add({ content, @@ -168,6 +55,7 @@ const addMemoryTool = async ( logger.info("Memory saved successfully", { containerTag, customId, + content, contentLength: content.length, memoryId: response.id, }) @@ -183,7 +71,7 @@ export const createSupermemoryMiddleware = ( conversationId?: string, verbose = false, mode: "profile" | "query" | "full" = "profile", - addMemory: "always" | "never" = "never" + addMemory: "always" | "never" = "never", ): LanguageModelV2Middleware => { const logger = createLogger(verbose) @@ -195,17 +83,6 @@ export const createSupermemoryMiddleware = ( transformParams: async ({ params }) => { const userMessage = getLastUserMessage(params) - if (addMemory === "always" && userMessage && userMessage.trim()) { - const content = conversationId - ? getConversationContent(params) - : userMessage - const customId = conversationId - ? `conversation:${conversationId}` - : undefined - - addMemoryTool(client, containerTag, content, customId, logger) - } - if (mode !== "profile") { if (!userMessage) { logger.debug("No user message found, skipping memory search") @@ -227,5 +104,84 @@ export const createSupermemoryMiddleware = ( ) return transformedParams }, + wrapGenerate: async ({ doGenerate, params }) => { + const userMessage = getLastUserMessage(params) + + try { + const result = await doGenerate() + const assistantResponse = result.content + const assistantResponseText = assistantResponse + .map((content) => (content.type === "text" ? content.text : "")) + .join("") + + if (addMemory === "always" && userMessage && userMessage.trim()) { + addMemoryTool( + client, + containerTag, + conversationId, + assistantResponseText, + params, + logger, + ) + } + + return result + } catch (error) { + logger.error("Error generating response", { + error: error instanceof Error ? error.message : "Unknown error", + }) + throw error + } + }, + wrapStream: async ({ doStream, params }) => { + const userMessage = getLastUserMessage(params) + let generatedText = "" + + try { + const { stream, ...rest } = await doStream() + const transformStream = new TransformStream< + LanguageModelV2StreamPart, + LanguageModelV2StreamPart + >({ + transform(chunk, controller) { + if (chunk.type === "text-delta") { + generatedText += chunk.delta + } + + controller.enqueue(chunk) + }, + flush: async () => { + const content: OutputContentItem[] = [] + if (generatedText) { + content.push({ + type: "text", + text: generatedText, + }) + } + + if (addMemory === "always" && userMessage && userMessage.trim()) { + addMemoryTool( + client, + containerTag, + conversationId, + generatedText, + params, + logger, + ) + } + }, + }) + + return { + stream: stream.pipeThrough(transformStream), + ...rest, + } + } catch (error) { + logger.error("Error streaming response", { + error: error instanceof Error ? error.message : "Unknown error", + }) + throw error + } + }, } } diff --git a/packages/tools/src/vercel/util.ts b/packages/tools/src/vercel/util.ts index 712a25eb..1c06ec16 100644 --- a/packages/tools/src/vercel/util.ts +++ b/packages/tools/src/vercel/util.ts @@ -1,17 +1,36 @@ +import type { LanguageModelV2CallOptions, LanguageModelV2Message } from "@ai-sdk/provider" + export interface ProfileStructure { profile: { static?: string[] dynamic?: string[] - }, + } searchResults: { results: [ { - memory: string, - } + memory: string + }, ] } } +export type OutputContentItem = + | { type: "text"; text: string } + | { type: "reasoning"; text: string } + | { + type: "tool-call" + id: string + function: { name: string; arguments: string } + } + | { type: "file"; name: string; mediaType: string; data: string } + | { + type: "source" + sourceType: string + id: string + url: string + title: string + } + /** * Convert ProfileStructure to markdown * based on profile.static and profile.dynamic properties @@ -33,3 +52,21 @@ export function convertProfileToMarkdown(data: ProfileStructure): string { return sections.join("\n\n") } + +export const getLastUserMessage = (params: LanguageModelV2CallOptions) => { + const lastUserMessage = params.prompt + .slice() + .reverse() + .find((prompt: LanguageModelV2Message) => prompt.role === "user") + const memories = lastUserMessage?.content + .filter((content) => content.type === "text") + .map((content) => content.text) + .join(" ") + return memories +} + + +export const filterOutSupermemories = (content: string) => { + return content.split("User Supermemories: ")[0] +} + diff --git a/packages/tools/test/chatapp/app/api/chat/route.ts b/packages/tools/test/chatapp/app/api/chat/route.ts index 7fddcbec..dc7baf81 100644 --- a/packages/tools/test/chatapp/app/api/chat/route.ts +++ b/packages/tools/test/chatapp/app/api/chat/route.ts @@ -1,6 +1,6 @@ -import { generateText, type ModelMessage } from "ai" +import { streamText, type ModelMessage } from "ai" import { openai } from "@ai-sdk/openai" -import { withSupermemory } from "../../../../../src/vercel" +import { withSupermemory } from "../../../../../src/vercel" const model = withSupermemory(openai("gpt-4"), "user-123", { mode: "full", @@ -9,15 +9,23 @@ const model = withSupermemory(openai("gpt-4"), "user-123", { verbose: true, }) - export async function POST(req: Request) { const { messages }: { messages: ModelMessage[] } = await req.json() - const { response } = await generateText({ + // Commented out generateText implementation + // const { response } = await generateText({ + // model, + // system: "You are a helpful assistant.", + // messages, + // }) + // return Response.json({ messages: response.messages }) + + // New streaming implementation + const result = await streamText({ model, system: "You are a helpful assistant.", messages, }) - return Response.json({ messages: response.messages }) -}
\ No newline at end of file + return result.toUIMessageStreamResponse() +} diff --git a/packages/tools/test/chatapp/app/api/stream/route.ts b/packages/tools/test/chatapp/app/api/stream/route.ts new file mode 100644 index 00000000..d7cbbbe0 --- /dev/null +++ b/packages/tools/test/chatapp/app/api/stream/route.ts @@ -0,0 +1,22 @@ +import { convertToModelMessages, streamText, type UIMessage } from "ai" +import { openai } from "@ai-sdk/openai" +import { withSupermemory } from "../../../../../src/vercel" + +const model = withSupermemory(openai("gpt-4"), "user-123", { + mode: "full", + addMemory: "always", + conversationId: "chat-session", + verbose: true, +}) + +export async function POST(req: Request) { + const { messages }: { messages: UIMessage[] } = await req.json() + + const result = streamText({ + model, + system: "You are a helpful assistant.", + messages: convertToModelMessages(messages), + }) + + return result.toUIMessageStreamResponse() +} diff --git a/packages/tools/test/chatapp/app/stream/page.tsx b/packages/tools/test/chatapp/app/stream/page.tsx new file mode 100644 index 00000000..4bc209ed --- /dev/null +++ b/packages/tools/test/chatapp/app/stream/page.tsx @@ -0,0 +1,108 @@ +"use client" + +import { DefaultChatTransport } from "ai" +import { useChat } from "@ai-sdk/react" +import { useState } from "react" + +export default function Page() { + const [input, setInput] = useState("") + + const { messages, sendMessage, status } = useChat({ + // @ts-expect-error - Type mismatch between ai and @ai-sdk/react versions + transport: new DefaultChatTransport({ + api: "/api/stream", + }), + }) + + return ( + <div className="min-h-screen bg-background"> + <div className="max-w-4xl mx-auto p-4 h-screen flex flex-col"> + {/* Header */} + <div className="mb-6"> + <h1 className="text-2xl font-bold text-foreground">Chat App</h1> + <p className="text-muted-foreground"> + Chat with AI using Supermemory + </p> + </div> + + {/* Messages Container */} + <div className="flex-1 overflow-y-auto space-y-4 mb-4"> + {messages.length === 0 && ( + <div className="text-center text-muted-foreground py-8"> + Start a conversation by typing a message below + </div> + )} + {messages.map((message, index) => ( + <div + key={`${message.role}-${index}`} + className={`flex ${ + message.role === "user" ? "justify-end" : "justify-start" + }`} + > + <div + className={`max-w-[80%] rounded-lg px-4 py-2 ${ + message.role === "user" + ? "bg-blue-500 text-white" + : "bg-gray-100 text-gray-900 dark:bg-gray-800 dark:text-gray-100" + }`} + > + <div className="text-sm font-medium mb-1 capitalize"> + {message.role} + </div> + <div> + {message.parts.map((part) => { + if (part.type === "text") { + return <div key={`${message.id}-text`}>{part.text}</div> + } + return null + })} + </div> + </div> + </div> + ))} + {status === "streaming" && ( + <div className="flex justify-start"> + <div className="bg-gray-100 dark:bg-gray-800 rounded-lg px-4 py-2"> + <div className="flex items-center space-x-2"> + <div className="animate-spin rounded-full h-4 w-4 border-b-2 border-gray-600" /> + <span className="text-sm text-muted-foreground"> + AI is thinking... + </span> + </div> + </div> + </div> + )} + </div> + + {/* Input Container */} + <div className="flex gap-2"> + <input + value={input} + onChange={(event) => setInput(event.target.value)} + onKeyDown={(event) => { + if (event.key === "Enter" && !event.shiftKey) { + event.preventDefault() + sendMessage({ + parts: [{ type: "text", text: input }], + }) + } + }} + placeholder="Type your message here..." + disabled={status === "streaming"} + className="flex-1 px-4 py-2 border border-gray-300 dark:border-gray-600 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent bg-background text-foreground placeholder:text-muted-foreground disabled:opacity-50 disabled:cursor-not-allowed" + /> + <button + type="button" + onClick={() => + sendMessage({ parts: [{ type: "text", text: input }] }) + } + disabled={!input.trim() || status === "streaming"} + className="px-6 py-2 bg-blue-500 text-white rounded-lg hover:bg-blue-600 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2 disabled:opacity-50 disabled:cursor-not-allowed transition-colors" + > + Send + </button> + </div> + </div> + </div> + ) +} |