aboutsummaryrefslogtreecommitdiff
path: root/packages
diff options
context:
space:
mode:
authorDhravya <[email protected]>2025-12-20 00:46:13 +0000
committerDhravya <[email protected]>2025-12-20 00:46:13 +0000
commit81e192e6168e99efa8055365184bb9fbf9afcd2b (patch)
treeabdee7813b3ca0ad1946a1b7475a7cf281da726c /packages
parentfeat(docs): v4 hybrid search parameter and examples (#621) (diff)
downloadsupermemory-81e192e6168e99efa8055365184bb9fbf9afcd2b.tar.xz
supermemory-81e192e6168e99efa8055365184bb9fbf9afcd2b.zip
Support for conversations in SDKs (#618)12-15-support_for_conversations
Diffstat (limited to 'packages')
-rw-r--r--packages/ai-sdk/package.json6
-rw-r--r--packages/tools/package.json2
-rw-r--r--packages/tools/src/conversations-client.ts100
-rw-r--r--packages/tools/src/openai/index.ts2
-rw-r--r--packages/tools/src/openai/middleware.ts181
-rw-r--r--packages/tools/src/vercel/index.ts5
-rw-r--r--packages/tools/src/vercel/memory-prompt.ts11
-rw-r--r--packages/tools/src/vercel/middleware.ts112
-rw-r--r--packages/tools/test-supermemory.ts53
-rw-r--r--packages/tools/test/chatapp/.gitignore1
-rw-r--r--packages/tools/test/chatapp/app/api/openai-chat/route.ts1
-rw-r--r--packages/tools/test/chatapp/app/api/stream/route.ts1
12 files changed, 432 insertions, 43 deletions
diff --git a/packages/ai-sdk/package.json b/packages/ai-sdk/package.json
index 64fd2114..f3800c1e 100644
--- a/packages/ai-sdk/package.json
+++ b/packages/ai-sdk/package.json
@@ -12,9 +12,9 @@
"dependencies": {
"@ai-sdk/openai": "^2.0.22",
"@ai-sdk/provider": "^2.0.0",
- "ai": "^5.0.26",
+ "ai": "^5.0.113",
"supermemory": "^3.0.0-alpha.26",
- "zod": "^4.1.4"
+ "zod": "^4.1.8"
},
"devDependencies": {
"@total-typescript/tsconfig": "^1.0.4",
@@ -25,7 +25,7 @@
},
"main": "./dist/index.js",
"module": "./dist/index.js",
- "types": "./dist/index.d.ts",
+ "types": "./dist/index-CITmF79o.d.ts",
"exports": {
".": "./dist/index.js",
"./package.json": "./package.json"
diff --git a/packages/tools/package.json b/packages/tools/package.json
index 2e8550ad..4edd0a19 100644
--- a/packages/tools/package.json
+++ b/packages/tools/package.json
@@ -1,7 +1,7 @@
{
"name": "@supermemory/tools",
"type": "module",
- "version": "1.3.13",
+ "version": "1.3.51",
"description": "Memory tools for AI SDK and OpenAI function calling with supermemory",
"scripts": {
"build": "tsdown",
diff --git a/packages/tools/src/conversations-client.ts b/packages/tools/src/conversations-client.ts
new file mode 100644
index 00000000..c147265e
--- /dev/null
+++ b/packages/tools/src/conversations-client.ts
@@ -0,0 +1,100 @@
+/**
+ * Client for the Supermemory Conversations API
+ *
+ * This module provides a helper function to ingest conversations using the
+ * /v4/conversations endpoint, which supports structured messages with smart
+ * diffing and append detection on the backend.
+ */
+
+export interface ConversationMessage {
+ role: "user" | "assistant" | "system" | "tool"
+ content: string | ContentPart[]
+ name?: string
+ tool_calls?: ToolCall[]
+ tool_call_id?: string
+}
+
+export interface ContentPart {
+ type: "text" | "image_url"
+ text?: string
+ image_url?: { url: string }
+}
+
+export interface ToolCall {
+ id: string
+ type: "function"
+ function: {
+ name: string
+ arguments: string
+ }
+}
+
+export interface AddConversationParams {
+ conversationId: string
+ messages: ConversationMessage[]
+ containerTags?: string[]
+ metadata?: Record<string, string | number | boolean>
+ apiKey: string
+ baseUrl?: string
+}
+
+export interface AddConversationResponse {
+ id: string
+ conversationId: string
+ status: string
+}
+
+/**
+ * Adds a conversation to Supermemory using the /v4/conversations endpoint
+ *
+ * This endpoint supports:
+ * - Structured messages with roles (user, assistant, system, tool)
+ * - Multi-modal content (text, images)
+ * - Tool calls and responses
+ *
+ * @param params - Configuration for adding the conversation
+ * @returns Promise resolving to the conversation response
+ * @throws Error if the API request fails
+ *
+ * @example
+ * ```typescript
+ * const response = await addConversation({
+ * conversationId: "conv-123",
+ * messages: [
+ * { role: "user", content: "Hello!" },
+ * { role: "assistant", content: "Hi there!" }
+ * ],
+ * containerTags: ["user-456"],
+ * apiKey: process.env.SUPERMEMORY_API_KEY,
+ * })
+ * ```
+ */
+export async function addConversation(
+ params: AddConversationParams,
+): Promise<AddConversationResponse> {
+ const baseUrl = params.baseUrl || "https://api.supermemory.ai"
+ const url = `${baseUrl}/v4/conversations`
+
+ const response = await fetch(url, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${params.apiKey}`,
+ },
+ body: JSON.stringify({
+ conversationId: params.conversationId,
+ messages: params.messages,
+ containerTags: params.containerTags,
+ metadata: params.metadata,
+ }),
+ })
+
+ if (!response.ok) {
+ const errorText = await response.text().catch(() => "Unknown error")
+ throw new Error(
+ `Failed to add conversation: ${response.status} ${response.statusText}. ${errorText}`,
+ )
+ }
+
+ return await response.json()
+}
diff --git a/packages/tools/src/openai/index.ts b/packages/tools/src/openai/index.ts
index 54e98402..6200d5b1 100644
--- a/packages/tools/src/openai/index.ts
+++ b/packages/tools/src/openai/index.ts
@@ -71,6 +71,7 @@ export function withSupermemory(
const verbose = options?.verbose ?? false
const mode = options?.mode ?? "profile"
const addMemory = options?.addMemory ?? "never"
+ const baseUrl = options?.baseUrl
const openaiWithSupermemory = createOpenAIMiddleware(
openaiClient,
@@ -80,6 +81,7 @@ export function withSupermemory(
verbose,
mode,
addMemory,
+ baseUrl,
},
)
diff --git a/packages/tools/src/openai/middleware.ts b/packages/tools/src/openai/middleware.ts
index 3a851b31..29b66f70 100644
--- a/packages/tools/src/openai/middleware.ts
+++ b/packages/tools/src/openai/middleware.ts
@@ -1,13 +1,21 @@
import type OpenAI from "openai"
import Supermemory from "supermemory"
+import { addConversation } from "../conversations-client"
import { createLogger, type Logger } from "../vercel/logger"
import { convertProfileToMarkdown } from "../vercel/util"
+const normalizeBaseUrl = (url?: string): string => {
+ const defaultUrl = "https://api.supermemory.ai"
+ if (!url) return defaultUrl
+ return url.endsWith("/") ? url.slice(0, -1) : url
+}
+
export interface OpenAIMiddlewareOptions {
conversationId?: string
verbose?: boolean
mode?: "profile" | "query" | "full"
addMemory?: "always" | "never"
+ baseUrl?: string
}
interface SupermemoryProfileSearch {
@@ -78,6 +86,7 @@ const getLastUserMessage = (
const supermemoryProfileSearch = async (
containerTag: string,
queryText: string,
+ baseUrl: string,
): Promise<SupermemoryProfileSearch> => {
const payload = queryText
? JSON.stringify({
@@ -89,7 +98,7 @@ const supermemoryProfileSearch = async (
})
try {
- const response = await fetch("https://api.supermemory.ai/v4/profile", {
+ const response = await fetch(`${baseUrl}/v4/profile`, {
method: "POST",
headers: {
"Content-Type": "application/json",
@@ -147,18 +156,61 @@ const addSystemPrompt = async (
containerTag: string,
logger: Logger,
mode: "profile" | "query" | "full",
+ baseUrl: string,
) => {
const systemPromptExists = messages.some((msg) => msg.role === "system")
const queryText = mode !== "profile" ? getLastUserMessage(messages) : ""
- const memories = await searchAndFormatMemories(
+ const memoriesResponse = await supermemoryProfileSearch(
+ containerTag,
queryText,
+ baseUrl,
+ )
+
+ const memoryCountStatic = memoriesResponse.profile.static?.length || 0
+ const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0
+
+ logger.info("Memory search completed for chat API", {
containerTag,
- logger,
+ memoryCountStatic,
+ memoryCountDynamic,
+ queryText:
+ queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""),
mode,
- "chat",
- )
+ })
+
+ const profileData =
+ mode !== "query"
+ ? convertProfileToMarkdown({
+ profile: {
+ static: memoriesResponse.profile.static?.map((item) => item.memory),
+ dynamic: memoriesResponse.profile.dynamic?.map(
+ (item) => item.memory,
+ ),
+ },
+ searchResults: {
+ results: memoriesResponse.searchResults.results.map((item) => ({
+ memory: item.memory,
+ })) as [{ memory: string }],
+ },
+ })
+ : ""
+ const searchResultsMemories =
+ mode !== "profile"
+ ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results
+ .map((result) => `- ${result.memory}`)
+ .join("\n")}`
+ : ""
+
+ const memories = `${profileData}\n${searchResultsMemories}`.trim()
+
+ if (memories) {
+ logger.debug("Memory content preview for chat API", {
+ content: memories,
+ fullLength: memories.length,
+ })
+ }
if (systemPromptExists) {
logger.debug("Added memories to existing system prompt")
@@ -215,11 +267,17 @@ const getConversationContent = (
* Saves the provided content as a memory with the specified container tag and
* optional custom ID. Logs success or failure information for debugging.
*
+ * If customId starts with "conversation:" and messages are provided, uses the
+ * /v4/conversations endpoint with structured messages instead of the memories endpoint.
+ *
* @param client - SuperMemory client instance
* @param containerTag - The container tag/identifier for the memory
- * @param content - The content to save as a memory
- * @param customId - Optional custom ID for the memory (e.g., conversation ID)
+ * @param content - The content to save as a memory (used for fallback)
+ * @param customId - Optional custom ID for the memory (e.g., conversation:456)
* @param logger - Logger instance for debugging and info output
+ * @param messages - Optional OpenAI messages array (for conversation endpoint)
+ * @param apiKey - API key for direct conversation endpoint calls
+ * @param baseUrl - Base URL for API calls
* @returns Promise that resolves when memory is saved (or fails silently)
*
* @example
@@ -227,9 +285,12 @@ const getConversationContent = (
* await addMemoryTool(
* supermemoryClient,
* "user-123",
- * "User prefers React with TypeScript",
- * "conversation-456",
- * logger
+ * "User: Hello\n\nAssistant: Hi!",
+ * "conversation:456",
+ * logger,
+ * messages, // OpenAI messages array
+ * apiKey,
+ * baseUrl
* )
* ```
*/
@@ -239,8 +300,51 @@ const addMemoryTool = async (
content: string,
customId: string | undefined,
logger: Logger,
+ messages?: OpenAI.Chat.Completions.ChatCompletionMessageParam[],
+ apiKey?: string,
+ baseUrl?: string,
): Promise<void> => {
try {
+ if (customId && messages && apiKey) {
+ const conversationId = customId.replace("conversation:", "")
+
+ // Convert OpenAI messages to conversation format
+ const conversationMessages = messages.map((msg) => ({
+ role: msg.role as "user" | "assistant" | "system" | "tool",
+ content:
+ typeof msg.content === "string"
+ ? msg.content
+ : Array.isArray(msg.content)
+ ? msg.content
+ .filter((c) => c.type === "text")
+ .map((c) => ({
+ type: "text" as const,
+ text: (c as { type: "text"; text: string }).text,
+ }))
+ : "",
+ ...((msg as any).name && { name: (msg as any).name }),
+ ...((msg as any).tool_calls && { tool_calls: (msg as any).tool_calls }),
+ ...((msg as any).tool_call_id && { tool_call_id: (msg as any).tool_call_id }),
+ }))
+
+ const response = await addConversation({
+ conversationId,
+ messages: conversationMessages,
+ containerTags: [containerTag],
+ apiKey,
+ baseUrl,
+ })
+
+ logger.info("Conversation saved successfully via /v4/conversations", {
+ containerTag,
+ conversationId,
+ messageCount: messages.length,
+ responseId: response.id,
+ })
+ return
+ }
+
+ // Fallback to old behavior for non-conversation memories
const response = await client.memories.add({
content,
containerTags: [containerTag],
@@ -293,8 +397,10 @@ export function createOpenAIMiddleware(
options?: OpenAIMiddlewareOptions,
) {
const logger = createLogger(options?.verbose ?? false)
+ const baseUrl = normalizeBaseUrl(options?.baseUrl)
const client = new Supermemory({
apiKey: process.env.SUPERMEMORY_API_KEY,
+ ...(baseUrl !== "https://api.supermemory.ai" ? { baseURL: baseUrl } : {}),
})
const conversationId = options?.conversationId
@@ -327,6 +433,7 @@ export function createOpenAIMiddleware(
const memoriesResponse = await supermemoryProfileSearch(
containerTag,
queryText,
+ baseUrl,
)
const memoryCountStatic = memoriesResponse.profile.static?.length || 0
@@ -345,7 +452,9 @@ export function createOpenAIMiddleware(
mode !== "query"
? convertProfileToMarkdown({
profile: {
- static: memoriesResponse.profile.static?.map((item) => item.memory),
+ static: memoriesResponse.profile.static?.map(
+ (item) => item.memory,
+ ),
dynamic: memoriesResponse.profile.dynamic?.map(
(item) => item.memory,
),
@@ -380,7 +489,9 @@ export function createOpenAIMiddleware(
params: Parameters<typeof originalResponsesCreate>[0],
) => {
if (!originalResponsesCreate) {
- throw new Error("Responses API is not available in this OpenAI client version")
+ throw new Error(
+ "Responses API is not available in this OpenAI client version",
+ )
}
const input = typeof params.input === "string" ? params.input : ""
@@ -399,24 +510,26 @@ export function createOpenAIMiddleware(
const operations: Promise<any>[] = []
if (addMemory === "always" && input?.trim()) {
- const content = conversationId
- ? `Input: ${input}`
- : input
+ const content = conversationId ? `Input: ${input}` : input
const customId = conversationId
? `conversation:${conversationId}`
: undefined
- operations.push(addMemoryTool(client, containerTag, content, customId, logger))
+ operations.push(
+ addMemoryTool(client, containerTag, content, customId, logger),
+ )
}
const queryText = mode !== "profile" ? input : ""
- operations.push(searchAndFormatMemories(
- queryText,
- containerTag,
- logger,
- mode,
- "responses",
- ))
+ operations.push(
+ searchAndFormatMemories(
+ queryText,
+ containerTag,
+ logger,
+ mode,
+ "responses",
+ ),
+ )
const results = await Promise.all(operations)
const memories = results[results.length - 1] // Memory search result is always last
@@ -462,16 +575,24 @@ export function createOpenAIMiddleware(
? `conversation:${conversationId}`
: undefined
- operations.push(addMemoryTool(client, containerTag, content, customId, logger))
+ operations.push(
+ addMemoryTool(
+ client,
+ containerTag,
+ content,
+ customId,
+ logger,
+ messages,
+ process.env.SUPERMEMORY_API_KEY,
+ baseUrl,
+ ),
+ )
}
}
- operations.push(addSystemPrompt(
- messages,
- containerTag,
- logger,
- mode,
- ))
+ operations.push(
+ addSystemPrompt(messages, containerTag, logger, mode, baseUrl),
+ )
const results = await Promise.all(operations)
const enhancedMessages = results[results.length - 1] // Enhanced messages result is always last
diff --git a/packages/tools/src/vercel/index.ts b/packages/tools/src/vercel/index.ts
index 1d87c73c..5c74d9e5 100644
--- a/packages/tools/src/vercel/index.ts
+++ b/packages/tools/src/vercel/index.ts
@@ -8,6 +8,7 @@ interface WrapVercelLanguageModelOptions {
mode?: "profile" | "query" | "full";
addMemory?: "always" | "never";
apiKey?: string;
+ baseUrl?: string;
}
/**
@@ -26,6 +27,7 @@ interface WrapVercelLanguageModelOptions {
* @param options.mode - Optional mode for memory search: "profile", "query", or "full" (default: "profile")
* @param options.addMemory - Optional mode for memory search: "always", "never" (default: "never")
* @param options.apiKey - Optional Supermemory API key to use instead of the environment variable
+ * @param options.baseUrl - Optional base URL for the Supermemory API (default: "https://api.supermemory.ai")
*
* @returns A wrapped language model that automatically includes relevant memories in prompts
*
@@ -64,10 +66,11 @@ const wrapVercelLanguageModel = (
const verbose = options?.verbose ?? false
const mode = options?.mode ?? "profile"
const addMemory = options?.addMemory ?? "never"
+ const baseUrl = options?.baseUrl
const wrappedModel = wrapLanguageModel({
model,
- middleware: createSupermemoryMiddleware(containerTag, providedApiKey, conversationId, verbose, mode, addMemory),
+ middleware: createSupermemoryMiddleware(containerTag, providedApiKey, conversationId, verbose, mode, addMemory, baseUrl),
})
return wrappedModel
diff --git a/packages/tools/src/vercel/memory-prompt.ts b/packages/tools/src/vercel/memory-prompt.ts
index 8b2e302b..d6dff44a 100644
--- a/packages/tools/src/vercel/memory-prompt.ts
+++ b/packages/tools/src/vercel/memory-prompt.ts
@@ -2,9 +2,16 @@ import type { LanguageModelV2CallOptions } from "@ai-sdk/provider"
import type { Logger } from "./logger"
import { convertProfileToMarkdown, type ProfileStructure } from "./util"
+export const normalizeBaseUrl = (url?: string): string => {
+ const defaultUrl = "https://api.supermemory.ai"
+ if (!url) return defaultUrl
+ return url.endsWith("/") ? url.slice(0, -1) : url
+}
+
const supermemoryProfileSearch = async (
containerTag: string,
queryText: string,
+ baseUrl: string,
): Promise<ProfileStructure> => {
const payload = queryText
? JSON.stringify({
@@ -16,7 +23,7 @@ const supermemoryProfileSearch = async (
})
try {
- const response = await fetch("https://api.supermemory.ai/v4/profile", {
+ const response = await fetch(`${baseUrl}/v4/profile`, {
method: "POST",
headers: {
"Content-Type": "application/json",
@@ -46,6 +53,7 @@ export const addSystemPrompt = async (
containerTag: string,
logger: Logger,
mode: "profile" | "query" | "full",
+ baseUrl = "https://api.supermemory.ai",
) => {
const systemPromptExists = params.prompt.some(
(prompt) => prompt.role === "system",
@@ -65,6 +73,7 @@ export const addSystemPrompt = async (
const memoriesResponse = await supermemoryProfileSearch(
containerTag,
queryText,
+ baseUrl,
)
const memoryCountStatic = memoriesResponse.profile.static?.length || 0
diff --git a/packages/tools/src/vercel/middleware.ts b/packages/tools/src/vercel/middleware.ts
index bc107a0a..260718b2 100644
--- a/packages/tools/src/vercel/middleware.ts
+++ b/packages/tools/src/vercel/middleware.ts
@@ -4,13 +4,17 @@ import type {
LanguageModelV2StreamPart,
} from "@ai-sdk/provider"
import Supermemory from "supermemory"
+import {
+ addConversation,
+ type ConversationMessage,
+} from "../conversations-client"
import { createLogger, type Logger } from "./logger"
import {
type OutputContentItem,
getLastUserMessage,
filterOutSupermemories,
} from "./util"
-import { addSystemPrompt } from "./memory-prompt"
+import { addSystemPrompt, normalizeBaseUrl } from "./memory-prompt"
const getConversationContent = (params: LanguageModelV2CallOptions) => {
return params.prompt
@@ -31,6 +35,66 @@ const getConversationContent = (params: LanguageModelV2CallOptions) => {
.join("\n\n")
}
+const convertToConversationMessages = (
+ params: LanguageModelV2CallOptions,
+ assistantResponseText: string,
+): ConversationMessage[] => {
+ const messages: ConversationMessage[] = []
+
+ for (const msg of params.prompt) {
+ if (typeof msg.content === "string") {
+ const filteredContent = filterOutSupermemories(msg.content)
+ if (filteredContent) {
+ messages.push({
+ role: msg.role as "user" | "assistant" | "system" | "tool",
+ content: filteredContent,
+ })
+ }
+ } else {
+ const contentParts = msg.content
+ .map((c) => {
+ if (c.type === "text") {
+ const filteredText = filterOutSupermemories(c.text)
+ if (filteredText) {
+ return {
+ type: "text" as const,
+ text: filteredText,
+ }
+ }
+ }
+ if (
+ c.type === "file" &&
+ typeof c.data === "string" &&
+ c.mediaType.startsWith("image/")
+ ) {
+ return {
+ type: "image_url" as const,
+ image_url: { url: c.data },
+ }
+ }
+ return null
+ })
+ .filter((part) => part !== null)
+
+ if (contentParts.length > 0) {
+ messages.push({
+ role: msg.role as "user" | "assistant" | "system" | "tool",
+ content: contentParts,
+ })
+ }
+ }
+ }
+
+ if (assistantResponseText) {
+ messages.push({
+ role: "assistant",
+ content: assistantResponseText,
+ })
+ }
+
+ return messages
+}
+
const addMemoryTool = async (
client: Supermemory,
containerTag: string,
@@ -38,21 +102,47 @@ const addMemoryTool = async (
assistantResponseText: string,
params: LanguageModelV2CallOptions,
logger: Logger,
+ apiKey: string,
+ baseUrl: string,
): Promise<void> => {
- const userMessage = getLastUserMessage(params)
- const content = conversationId
- ? `${getConversationContent(params)} \n\n Assistant: ${assistantResponseText}`
- : `User: ${userMessage} \n\n Assistant: ${assistantResponseText}`
const customId = conversationId ? `conversation:${conversationId}` : undefined
try {
+ if (customId && conversationId) {
+ const conversationMessages = convertToConversationMessages(
+ params,
+ assistantResponseText,
+ )
+
+ const response = await addConversation({
+ conversationId,
+ messages: conversationMessages,
+ containerTags: [containerTag],
+ apiKey,
+ baseUrl,
+ })
+
+ logger.info("Conversation saved successfully via /v4/conversations", {
+ containerTag,
+ conversationId,
+ messageCount: conversationMessages.length,
+ responseId: response.id,
+ })
+ return
+ }
+
+ const userMessage = getLastUserMessage(params)
+ const content = conversationId
+ ? `${getConversationContent(params)} \n\n Assistant: ${assistantResponseText}`
+ : `User: ${userMessage} \n\n Assistant: ${assistantResponseText}`
+
const response = await client.memories.add({
content,
containerTags: [containerTag],
customId,
})
- logger.info("Memory saved successfully", {
+ logger.info("Memory saved successfully via /v3/documents", {
containerTag,
customId,
content,
@@ -73,11 +163,16 @@ export const createSupermemoryMiddleware = (
verbose = false,
mode: "profile" | "query" | "full" = "profile",
addMemory: "always" | "never" = "never",
+ baseUrl?: string,
): LanguageModelV2Middleware => {
const logger = createLogger(verbose)
+ const normalizedBaseUrl = normalizeBaseUrl(baseUrl)
const client = new Supermemory({
apiKey,
+ ...(normalizedBaseUrl !== "https://api.supermemory.ai"
+ ? { baseURL: normalizedBaseUrl }
+ : {}),
})
return {
@@ -102,6 +197,7 @@ export const createSupermemoryMiddleware = (
containerTag,
logger,
mode,
+ normalizedBaseUrl,
)
return transformedParams
},
@@ -123,6 +219,8 @@ export const createSupermemoryMiddleware = (
assistantResponseText,
params,
logger,
+ apiKey,
+ normalizedBaseUrl,
)
}
@@ -168,6 +266,8 @@ export const createSupermemoryMiddleware = (
generatedText,
params,
logger,
+ apiKey,
+ normalizedBaseUrl,
)
}
},
diff --git a/packages/tools/test-supermemory.ts b/packages/tools/test-supermemory.ts
new file mode 100644
index 00000000..5a96acc0
--- /dev/null
+++ b/packages/tools/test-supermemory.ts
@@ -0,0 +1,53 @@
+import { OpenAI } from "openai"
+import { withSupermemory } from "./src/openai"
+
+// Make sure to set these environment variables:
+// OPENAI_API_KEY=your_openai_api_key
+// SUPERMEMORY_API_KEY=your_supermemory_api_key
+
+const openai = new OpenAI({
+ apiKey: process.env.OPENAI_API_KEY,
+})
+
+// Wrap OpenAI client with supermemory
+const openaiWithSupermemory = withSupermemory(openai, "test_user_123", {
+ verbose: true, // Enable logging to see what's happening
+ mode: "full", // Search both profile and query memories
+ addMemory: "always", // Auto-save conversations as memories
+})
+
+// async function testChatCompletion() {
+// console.log("\n=== Testing Chat Completion ===")
+// const response = await openaiWithSupermemory.chat.completions.create({
+// model: "gpt-4o-mini",
+// messages: [
+// { role: "user", content: "My favorite color is blue" },
+// ],
+// })
+
+// console.log("Response:", response.choices[0]?.message.content)
+// }
+
+async function testResponses() {
+ console.log("\n=== Testing Responses API ===")
+ const response = await openaiWithSupermemory.chat.completions.create({
+ model: "gpt-4o",
+ messages: [
+ { role: "user", content: "what's my favoritge color?" },
+ ],
+ })
+
+ console.log("Response:", JSON.stringify(response.choices[0]?.message.content, null, 2))
+}
+
+// Run tests
+async function main() {
+ try {
+ // await testChatCompletion()
+ await testResponses()
+ } catch (error) {
+ console.error("Error:", error)
+ }
+}
+
+main()
diff --git a/packages/tools/test/chatapp/.gitignore b/packages/tools/test/chatapp/.gitignore
index 5ef6a520..785d2ca6 100644
--- a/packages/tools/test/chatapp/.gitignore
+++ b/packages/tools/test/chatapp/.gitignore
@@ -1,5 +1,4 @@
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
-
# dependencies
/node_modules
/.pnp
diff --git a/packages/tools/test/chatapp/app/api/openai-chat/route.ts b/packages/tools/test/chatapp/app/api/openai-chat/route.ts
index 1787cc58..4e926fb0 100644
--- a/packages/tools/test/chatapp/app/api/openai-chat/route.ts
+++ b/packages/tools/test/chatapp/app/api/openai-chat/route.ts
@@ -19,6 +19,7 @@ export async function POST(req: Request) {
mode: "full",
addMemory: "always",
verbose: true,
+ baseUrl: process.env.SUPERMEMORY_BASE_URL,
})
const completion = await openaiWithSupermemory.chat.completions.create({
diff --git a/packages/tools/test/chatapp/app/api/stream/route.ts b/packages/tools/test/chatapp/app/api/stream/route.ts
index d7cbbbe0..175f4a65 100644
--- a/packages/tools/test/chatapp/app/api/stream/route.ts
+++ b/packages/tools/test/chatapp/app/api/stream/route.ts
@@ -7,6 +7,7 @@ const model = withSupermemory(openai("gpt-4"), "user-123", {
addMemory: "always",
conversationId: "chat-session",
verbose: true,
+ baseUrl: process.env.SUPERMEMORY_BASE_URL,
})
export async function POST(req: Request) {