From b3aab91489af0adb2e16215a02c20bc83259b42f Mon Sep 17 00:00:00 2001 From: MaheshtheDev <38828053+MaheshtheDev@users.noreply.github.com> Date: Mon, 27 Oct 2025 20:08:11 +0000 Subject: feat: withSupermemory for openai sdk (#531) ### TL;DR Added OpenAI SDK middleware support for SuperMemory integration, allowing direct memory injection without AI SDK dependency. ### What changed? - Added `withSupermemory` middleware for OpenAI SDK that automatically injects relevant memories into chat completions - Implemented memory search and injection functionality for OpenAI clients - Restructured the OpenAI module to separate tools and middleware functionality - Updated README with comprehensive documentation and examples for the new OpenAI middleware - Added test implementation with a Next.js API route example - Reorganized package exports to support the new structure --- packages/tools/README.md | 103 +++++- packages/tools/package.json | 4 +- packages/tools/src/index.ts | 3 +- packages/tools/src/openai.ts | 276 --------------- packages/tools/src/openai/index.ts | 92 +++++ packages/tools/src/openai/middleware.ts | 393 +++++++++++++++++++++ packages/tools/src/openai/tools.ts | 276 +++++++++++++++ .../test/chatapp/app/api/openai-chat/route.ts | 31 ++ .../test/chatapp/app/openai-chat/[chatId]/page.tsx | 109 ++++++ .../test/chatapp/app/openai-chat/new/page.tsx | 10 + packages/tools/test/chatapp/package.json | 1 + packages/tools/tsdown.config.ts | 2 +- 12 files changed, 1018 insertions(+), 282 deletions(-) delete mode 100644 packages/tools/src/openai.ts create mode 100644 packages/tools/src/openai/index.ts create mode 100644 packages/tools/src/openai/middleware.ts create mode 100644 packages/tools/src/openai/tools.ts create mode 100644 packages/tools/test/chatapp/app/api/openai-chat/route.ts create mode 100644 packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx create mode 100644 packages/tools/test/chatapp/app/openai-chat/new/page.tsx (limited to 'packages') diff --git a/packages/tools/README.md b/packages/tools/README.md index aae102f3..6db2f7f8 100644 --- a/packages/tools/README.md +++ b/packages/tools/README.md @@ -19,8 +19,8 @@ yarn add @supermemory/tools ## Usage The package provides two submodule imports: -- `@supermemory/tools/ai-sdk` - For use with the AI SDK framework -- `@supermemory/tools/openai` - For use with OpenAI's function calling +- `@supermemory/tools/ai-sdk` - For use with the AI SDK framework (includes `withSupermemory` middleware) +- `@supermemory/tools/openai` - For use with OpenAI SDK (includes `withSupermemory` middleware and function calling tools) ### AI SDK Usage @@ -223,6 +223,105 @@ const modelWithOptions = withSupermemory(openai("gpt-4"), "user-123", { }) ``` +### OpenAI SDK Usage + +#### OpenAI Middleware with Supermemory + +The `withSupermemory` function creates an OpenAI client with SuperMemory middleware automatically injected: + +```typescript +import { withSupermemory } from "@supermemory/tools/openai" + +// Create OpenAI client with supermemory middleware +const openaiWithSupermemory = withSupermemory("user-123", { + conversationId: "conversation-456", + mode: "full", + addMemory: "always", + verbose: true, +}) + +// Use directly with chat completions - memories are automatically injected +const completion = await openaiWithSupermemory.chat.completions.create({ + model: "gpt-4o-mini", + messages: [ + { role: "user", content: "What do you remember about my preferences?" } + ], +}) + +console.log(completion.choices[0]?.message?.content) +``` + +#### OpenAI Middleware Options + +The middleware supports the same configuration options as the AI SDK version: + +```typescript +const openaiWithSupermemory = withSupermemory("user-123", { + conversationId: "conversation-456", // Group messages for contextual memory + mode: "full", // "profile" | "query" | "full" + addMemory: "always", // "always" | "never" + verbose: true, // Enable detailed logging +}) +``` + +#### Advanced Usage with Custom OpenAI Options + +You can also pass custom OpenAI client options: + +```typescript +import { withSupermemory } from "@supermemory/tools/openai" + +const openaiWithSupermemory = withSupermemory( + "user-123", + { + mode: "profile", + addMemory: "always", + }, + { + baseURL: "https://api.openai.com/v1", + organization: "org-123", + }, + "custom-api-key" // Optional: custom API key +) + +const completion = await openaiWithSupermemory.chat.completions.create({ + model: "gpt-4o-mini", + messages: [{ role: "user", content: "Tell me about my preferences" }], +}) +``` + +#### Next.js API Route Example + +Here's a complete example for a Next.js API route: + +```typescript +// app/api/chat/route.ts +import { withSupermemory } from "@supermemory/tools/openai" +import type { OpenAI as OpenAIType } from "openai" + +export async function POST(req: Request) { + const { messages, conversationId } = (await req.json()) as { + messages: OpenAIType.Chat.Completions.ChatCompletionMessageParam[] + conversationId: string + } + + const openaiWithSupermemory = withSupermemory("user-123", { + conversationId, + mode: "full", + addMemory: "always", + verbose: true, + }) + + const completion = await openaiWithSupermemory.chat.completions.create({ + model: "gpt-4o-mini", + messages, + }) + + const message = completion.choices?.[0]?.message + return Response.json({ message, usage: completion.usage }) +} +``` + ### OpenAI Function Calling Usage ```typescript diff --git a/packages/tools/package.json b/packages/tools/package.json index 2a4f0a0a..371f865b 100644 --- a/packages/tools/package.json +++ b/packages/tools/package.json @@ -1,7 +1,7 @@ { "name": "@supermemory/tools", "type": "module", - "version": "1.2.16", + "version": "1.2.5", "description": "Memory tools for AI SDK and OpenAI function calling with supermemory", "scripts": { "build": "tsdown", @@ -38,7 +38,7 @@ ".": "./dist/index.js", "./ai-sdk": "./dist/ai-sdk.js", "./claude-memory": "./dist/claude-memory.js", - "./openai": "./dist/openai.js", + "./openai": "./dist/openai/index.js", "./package.json": "./package.json" }, "repository": { diff --git a/packages/tools/src/index.ts b/packages/tools/src/index.ts index 4f21246e..404e0943 100644 --- a/packages/tools/src/index.ts +++ b/packages/tools/src/index.ts @@ -1,2 +1,3 @@ -// Export shared types and utilities export type { SupermemoryToolsConfig } from "./types" + +export type { OpenAIMiddlewareOptions } from "./openai" diff --git a/packages/tools/src/openai.ts b/packages/tools/src/openai.ts deleted file mode 100644 index 5c79a9c1..00000000 --- a/packages/tools/src/openai.ts +++ /dev/null @@ -1,276 +0,0 @@ -import type OpenAI from "openai" -import Supermemory from "supermemory" -import { - DEFAULT_VALUES, - PARAMETER_DESCRIPTIONS, - TOOL_DESCRIPTIONS, - getContainerTags, -} from "./shared" -import type { SupermemoryToolsConfig } from "./types" - -/** - * Result types for memory operations - */ -export interface MemorySearchResult { - success: boolean - results?: Awaited>["results"] - count?: number - error?: string -} - -export interface MemoryAddResult { - success: boolean - memory?: Awaited> - error?: string -} - -/** - * Function schemas for OpenAI function calling - */ -export const memoryToolSchemas = { - searchMemories: { - name: "searchMemories", - description: TOOL_DESCRIPTIONS.searchMemories, - parameters: { - type: "object", - properties: { - informationToGet: { - type: "string", - description: PARAMETER_DESCRIPTIONS.informationToGet, - }, - includeFullDocs: { - type: "boolean", - description: PARAMETER_DESCRIPTIONS.includeFullDocs, - default: DEFAULT_VALUES.includeFullDocs, - }, - limit: { - type: "number", - description: PARAMETER_DESCRIPTIONS.limit, - default: DEFAULT_VALUES.limit, - }, - }, - required: ["informationToGet"], - }, - } satisfies OpenAI.FunctionDefinition, - - addMemory: { - name: "addMemory", - description: TOOL_DESCRIPTIONS.addMemory, - parameters: { - type: "object", - properties: { - memory: { - type: "string", - description: PARAMETER_DESCRIPTIONS.memory, - }, - }, - required: ["memory"], - }, - } satisfies OpenAI.FunctionDefinition, -} as const - -/** - * Create a Supermemory client with configuration - */ -function createClient(apiKey: string, config?: SupermemoryToolsConfig) { - const client = new Supermemory({ - apiKey, - ...(config?.baseUrl && { baseURL: config.baseUrl }), - }) - - const containerTags = getContainerTags(config) - - return { client, containerTags } -} - -/** - * Search memories function - */ -export function createSearchMemoriesFunction( - apiKey: string, - config?: SupermemoryToolsConfig, -) { - const { client, containerTags } = createClient(apiKey, config) - - return async function searchMemories({ - informationToGet, - includeFullDocs = DEFAULT_VALUES.includeFullDocs, - limit = DEFAULT_VALUES.limit, - }: { - informationToGet: string - includeFullDocs?: boolean - limit?: number - }): Promise { - try { - const response = await client.search.execute({ - q: informationToGet, - containerTags, - limit, - chunkThreshold: DEFAULT_VALUES.chunkThreshold, - includeFullDocs, - }) - - return { - success: true, - results: response.results, - count: response.results?.length || 0, - } - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : "Unknown error", - } - } - } -} - -/** - * Add memory function - */ -export function createAddMemoryFunction( - apiKey: string, - config?: SupermemoryToolsConfig, -) { - const { client, containerTags } = createClient(apiKey, config) - - return async function addMemory({ - memory, - }: { - memory: string - }): Promise { - try { - const metadata: Record = {} - - const response = await client.memories.add({ - content: memory, - containerTags, - ...(Object.keys(metadata).length > 0 && { metadata }), - }) - - return { - success: true, - memory: response, - } - } catch (error) { - return { - success: false, - error: error instanceof Error ? error.message : "Unknown error", - } - } - } -} - -/** - * Create all memory tools functions - */ -export function supermemoryTools( - apiKey: string, - config?: SupermemoryToolsConfig, -) { - const searchMemories = createSearchMemoriesFunction(apiKey, config) - const addMemory = createAddMemoryFunction(apiKey, config) - - return { - searchMemories, - addMemory, - } -} - -/** - * Get OpenAI function definitions for all memory tools - */ -export function getToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] { - return [ - { type: "function", function: memoryToolSchemas.searchMemories }, - { type: "function", function: memoryToolSchemas.addMemory }, - ] -} - -/** - * Execute a tool call based on the function name and arguments - */ -export function createToolCallExecutor( - apiKey: string, - config?: SupermemoryToolsConfig, -) { - const tools = supermemoryTools(apiKey, config) - - return async function executeToolCall( - toolCall: OpenAI.Chat.Completions.ChatCompletionMessageToolCall, - ): Promise { - const functionName = toolCall.function.name - const args = JSON.parse(toolCall.function.arguments) - - switch (functionName) { - case "searchMemories": - return JSON.stringify(await tools.searchMemories(args)) - case "addMemory": - return JSON.stringify(await tools.addMemory(args)) - default: - return JSON.stringify({ - success: false, - error: `Unknown function: ${functionName}`, - }) - } - } -} - -/** - * Execute tool calls from OpenAI function calling - */ -export function createToolCallsExecutor( - apiKey: string, - config?: SupermemoryToolsConfig, -) { - const executeToolCall = createToolCallExecutor(apiKey, config) - - return async function executeToolCalls( - toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[], - ): Promise { - const results = await Promise.all( - toolCalls.map(async (toolCall) => { - const result = await executeToolCall(toolCall) - return { - tool_call_id: toolCall.id, - role: "tool" as const, - content: result, - } - }), - ) - - return results - } -} - -/** - * Individual tool creators for more granular control - */ -export function createSearchMemoriesTool( - apiKey: string, - config?: SupermemoryToolsConfig, -) { - const searchMemories = createSearchMemoriesFunction(apiKey, config) - - return { - definition: { - type: "function" as const, - function: memoryToolSchemas.searchMemories, - }, - execute: searchMemories, - } -} - -export function createAddMemoryTool( - apiKey: string, - config?: SupermemoryToolsConfig, -) { - const addMemory = createAddMemoryFunction(apiKey, config) - - return { - definition: { - type: "function" as const, - function: memoryToolSchemas.addMemory, - }, - execute: addMemory, - } -} diff --git a/packages/tools/src/openai/index.ts b/packages/tools/src/openai/index.ts new file mode 100644 index 00000000..517fe282 --- /dev/null +++ b/packages/tools/src/openai/index.ts @@ -0,0 +1,92 @@ +import type OpenAI from "openai" +import { + createOpenAIMiddleware, + type OpenAIMiddlewareOptions, +} from "./middleware" + +/** + * Wraps an OpenAI client with SuperMemory middleware to automatically inject relevant memories + * into the system prompt based on the user's message content. + * + * This middleware searches the supermemory API for relevant memories using the container tag + * and user message, then either appends memories to an existing system prompt or creates + * a new system prompt with the memories. + * + * @param openaiClient - The OpenAI client to wrap with SuperMemory middleware + * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) + * @param options - Optional configuration options for the middleware + * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation + * @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false) + * @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full" + * @param options.addMemory - Optional mode for memory addition: "always" (default), "never" + * + * @returns An OpenAI client with SuperMemory middleware injected + * + * @example + * ```typescript + * import { withSupermemory } from "@supermemory/tools/openai" + * import OpenAI from "openai" + * + * // Create OpenAI client with supermemory middleware + * const openai = new OpenAI({ + * apiKey: process.env.OPENAI_API_KEY, + * }) + * const openaiWithSupermemory = withSupermemory(openai, "user-123", { + * conversationId: "conversation-456", + * mode: "full", + * addMemory: "always" + * }) + * + * // Use normally - memories will be automatically injected + * const response = await openaiWithSupermemory.chat.completions.create({ + * model: "gpt-4", + * messages: [ + * { role: "user", content: "What's my favorite programming language?" } + * ] + * }) + * ``` + * + * @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set + * @throws {Error} When supermemory API request fails + */ +export function withSupermemory( + openaiClient: OpenAI, + containerTag: string, + options?: OpenAIMiddlewareOptions, +) { + if (!process.env.SUPERMEMORY_API_KEY) { + throw new Error("SUPERMEMORY_API_KEY is not set") + } + + const conversationId = options?.conversationId + const verbose = options?.verbose ?? false + const mode = options?.mode ?? "profile" + const addMemory = options?.addMemory ?? "never" + + const openaiWithSupermemory = createOpenAIMiddleware( + openaiClient, + containerTag, + { + conversationId, + verbose, + mode, + addMemory, + }, + ) + + return openaiWithSupermemory +} + +export type { OpenAIMiddlewareOptions } +export type { MemorySearchResult, MemoryAddResult } from "./tools" +export { + createSearchMemoriesFunction, + createAddMemoryFunction, + supermemoryTools, + getToolDefinitions, + createToolCallExecutor, + createToolCallsExecutor, + createSearchMemoriesTool, + createAddMemoryTool, + memoryToolSchemas, +} from "./tools" diff --git a/packages/tools/src/openai/middleware.ts b/packages/tools/src/openai/middleware.ts new file mode 100644 index 00000000..8d83874d --- /dev/null +++ b/packages/tools/src/openai/middleware.ts @@ -0,0 +1,393 @@ +import type OpenAI from "openai" +import Supermemory from "supermemory" +import { createLogger, type Logger } from "../vercel/logger" +import { convertProfileToMarkdown } from "../vercel/util" + +export interface OpenAIMiddlewareOptions { + conversationId?: string + verbose?: boolean + mode?: "profile" | "query" | "full" + addMemory?: "always" | "never" +} + +interface SupermemoryProfileSearch { + profile: { + static?: Array<{ memory: string; metadata?: Record }> + dynamic?: Array<{ memory: string; metadata?: Record }> + } + searchResults: { + results: Array<{ memory: string; metadata?: Record }> + } +} + +/** + * Extracts the last user message from an array of chat completion messages. + * + * Searches through the messages array in reverse order to find the most recent + * message with role "user" and returns its content as a string. + * + * @param messages - Array of chat completion message parameters + * @returns The content of the last user message, or empty string if none found + * + * @example + * ```typescript + * const messages = [ + * { role: "system", content: "You are a helpful assistant." }, + * { role: "user", content: "Hello there!" }, + * { role: "assistant", content: "Hi! How can I help you?" }, + * { role: "user", content: "What's the weather like?" } + * ] + * + * const lastMessage = getLastUserMessage(messages) + * // Returns: "What's the weather like?" + * ``` + */ +const getLastUserMessage = ( + messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], +) => { + const lastUserMessage = messages + .slice() + .reverse() + .find((msg) => msg.role === "user") + + return typeof lastUserMessage?.content === "string" + ? lastUserMessage.content + : "" +} + +/** + * Searches for memories using the SuperMemory profile API. + * + * Makes a POST request to the SuperMemory API to retrieve user profile memories + * and search results based on the provided container tag and optional query text. + * + * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) + * @param queryText - Optional query text to search for specific memories. If empty, returns all profile memories + * @returns Promise that resolves to the SuperMemory profile search response + * @throws {Error} When the API request fails or returns an error status + * + * @example + * ```typescript + * // Search with query + * const results = await supermemoryProfileSearch("user-123", "favorite programming language") + * + * // Get all profile memories + * const profile = await supermemoryProfileSearch("user-123", "") + * ``` + */ +const supermemoryProfileSearch = async ( + containerTag: string, + queryText: string, +): Promise => { + const payload = queryText + ? JSON.stringify({ + q: queryText, + containerTag: containerTag, + }) + : JSON.stringify({ + containerTag: containerTag, + }) + + try { + const response = await fetch("https://api.supermemory.ai/v4/profile", { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${process.env.SUPERMEMORY_API_KEY}`, + }, + body: payload, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => "Unknown error") + throw new Error( + `Supermemory profile search failed: ${response.status} ${response.statusText}. ${errorText}`, + ) + } + + return await response.json() + } catch (error) { + if (error instanceof Error) { + throw error + } + throw new Error(`Supermemory API request failed: ${error}`) + } +} + +/** + * Adds memory-enhanced system prompts to chat completion messages. + * + * Searches for relevant memories based on the specified mode and injects them + * into the conversation. If a system prompt already exists, memories are appended + * to it. Otherwise, a new system prompt is created with the memories. + * + * @param messages - Array of chat completion message parameters + * @param containerTag - The container tag/identifier for memory search + * @param logger - Logger instance for debugging and info output + * @param mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both) + * @returns Promise that resolves to enhanced messages with memory-injected system prompt + * + * @example + * ```typescript + * const messages = [ + * { role: "user", content: "What's my favorite programming language?" } + * ] + * + * const enhancedMessages = await addSystemPrompt( + * messages, + * "user-123", + * logger, + * "full" + * ) + * // Returns messages with system prompt containing relevant memories + * ``` + */ +const addSystemPrompt = async ( + messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], + containerTag: string, + logger: Logger, + mode: "profile" | "query" | "full", +) => { + const systemPromptExists = messages.some((msg) => msg.role === "system") + + const queryText = mode !== "profile" ? getLastUserMessage(messages) : "" + + const memoriesResponse = await supermemoryProfileSearch( + containerTag, + queryText, + ) + + const memoryCountStatic = memoriesResponse.profile.static?.length || 0 + const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 + + logger.info("Memory search completed", { + containerTag, + memoryCountStatic, + memoryCountDynamic, + queryText: + queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), + mode, + }) + + const profileData = + mode !== "query" + ? convertProfileToMarkdown({ + profile: { + static: memoriesResponse.profile.static?.map((item) => item.memory), + dynamic: memoriesResponse.profile.dynamic?.map( + (item) => item.memory, + ), + }, + searchResults: { + results: memoriesResponse.searchResults.results.map((item) => ({ + memory: item.memory, + })) as [{ memory: string }], + }, + }) + : "" + const searchResultsMemories = + mode !== "profile" + ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results + .map((result) => `- ${result.memory}`) + .join("\n")}` + : "" + + const memories = `${profileData}\n${searchResultsMemories}`.trim() + + if (memories) { + logger.debug("Memory content preview", { + content: memories, + fullLength: memories.length, + }) + } + + if (systemPromptExists) { + logger.debug("Added memories to existing system prompt") + return messages.map((msg) => + msg.role === "system" + ? { ...msg, content: `${msg.content} \n ${memories}` } + : msg, + ) + } + + logger.debug( + "System prompt does not exist, created system prompt with memories", + ) + return [{ role: "system" as const, content: memories }, ...messages] +} + +/** + * Converts an array of chat completion messages into a formatted conversation string. + * + * Transforms the messages array into a readable conversation format where each + * message is prefixed with its role (User/Assistant) and messages are separated + * by double newlines. + * + * @param messages - Array of chat completion message parameters + * @returns Formatted conversation string with role prefixes + * + * @example + * ```typescript + * const messages = [ + * { role: "user", content: "Hello!" }, + * { role: "assistant", content: "Hi there!" }, + * { role: "user", content: "How are you?" } + * ] + * + * const conversation = getConversationContent(messages) + * // Returns: "User: Hello!\n\nAssistant: Hi there!\n\nUser: How are you?" + * ``` + */ +const getConversationContent = ( + messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], +) => { + return messages + .map((msg) => { + const role = msg.role === "user" ? "User" : "Assistant" + const content = typeof msg.content === "string" ? msg.content : "" + return `${role}: ${content}` + }) + .join("\n\n") +} + +/** + * Adds a new memory to the SuperMemory system. + * + * Saves the provided content as a memory with the specified container tag and + * optional custom ID. Logs success or failure information for debugging. + * + * @param client - SuperMemory client instance + * @param containerTag - The container tag/identifier for the memory + * @param content - The content to save as a memory + * @param customId - Optional custom ID for the memory (e.g., conversation ID) + * @param logger - Logger instance for debugging and info output + * @returns Promise that resolves when memory is saved (or fails silently) + * + * @example + * ```typescript + * await addMemoryTool( + * supermemoryClient, + * "user-123", + * "User prefers React with TypeScript", + * "conversation-456", + * logger + * ) + * ``` + */ +const addMemoryTool = async ( + client: Supermemory, + containerTag: string, + content: string, + customId: string | undefined, + logger: Logger, +): Promise => { + try { + const response = await client.memories.add({ + content, + containerTags: [containerTag], + customId, + }) + + logger.info("Memory saved successfully", { + containerTag, + customId, + contentLength: content.length, + memoryId: response.id, + }) + } catch (error) { + logger.error("Error saving memory", { + error: error instanceof Error ? error.message : "Unknown error", + }) + } +} + +/** + * Creates SuperMemory middleware for OpenAI clients. + * + * This function creates middleware that automatically injects relevant memories + * into OpenAI chat completions and optionally saves new memories. The middleware + * can wrap existing OpenAI clients or create new ones with SuperMemory capabilities. + * + * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) + * @param options - Optional configuration options for the middleware + * @param options.conversationId - Optional conversation ID to group messages for contextual memory generation + * @param options.verbose - Enable detailed logging of memory operations (default: false) + * @param options.mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both) (default: "profile") + * @param options.addMemory - Automatic memory storage mode: "always" or "never" (default: "never") + * @returns Object with `wrapClient` and `createClient` methods + * @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set + * + * @example + * ```typescript + * const openaiWithSupermemory = createOpenAIMiddleware(openai, "user-123", { + * conversationId: "conversation-456", + * mode: "full", + * addMemory: "always", + * verbose: true + * }) + * + * ``` + */ +export function createOpenAIMiddleware( + openaiClient: OpenAI, + containerTag: string, + options?: OpenAIMiddlewareOptions, +) { + const logger = createLogger(options?.verbose ?? false) + const client = new Supermemory({ + apiKey: process.env.SUPERMEMORY_API_KEY, + }) + + const originalCreate = openaiClient.chat.completions.create + + const createWithMemory = async ( + params: OpenAI.Chat.Completions.ChatCompletionCreateParams, + ) => { + const messages = Array.isArray(params.messages) ? params.messages : [] + + if (addMemory === "always") { + const userMessage = getLastUserMessage(messages) + if (userMessage?.trim()) { + const content = conversationId + ? getConversationContent(messages) + : userMessage + const customId = conversationId + ? `conversation:${conversationId}` + : undefined + + addMemoryTool(client, containerTag, content, customId, logger) + } + } + + if (mode !== "profile") { + const userMessage = getLastUserMessage(messages) + if (!userMessage) { + logger.debug("No user message found, skipping memory search") + return originalCreate.call(openaiClient.chat.completions, params) + } + } + + logger.info("Starting memory search", { + containerTag, + conversationId, + mode, + }) + + const enhancedMessages = await addSystemPrompt( + messages, + containerTag, + logger, + mode, + ) + + return originalCreate.call(openaiClient.chat.completions, { + ...params, + messages: enhancedMessages, + }) + } + + openaiClient.chat.completions.create = + createWithMemory as typeof originalCreate + + return openaiClient +} diff --git a/packages/tools/src/openai/tools.ts b/packages/tools/src/openai/tools.ts new file mode 100644 index 00000000..4078df09 --- /dev/null +++ b/packages/tools/src/openai/tools.ts @@ -0,0 +1,276 @@ +import type OpenAI from "openai" +import Supermemory from "supermemory" +import { + DEFAULT_VALUES, + PARAMETER_DESCRIPTIONS, + TOOL_DESCRIPTIONS, + getContainerTags, +} from "../shared" +import type { SupermemoryToolsConfig } from "../types" + +/** + * Result types for memory operations + */ +export interface MemorySearchResult { + success: boolean + results?: Awaited>["results"] + count?: number + error?: string +} + +export interface MemoryAddResult { + success: boolean + memory?: Awaited> + error?: string +} + +/** + * Function schemas for OpenAI function calling + */ +export const memoryToolSchemas = { + searchMemories: { + name: "searchMemories", + description: TOOL_DESCRIPTIONS.searchMemories, + parameters: { + type: "object", + properties: { + informationToGet: { + type: "string", + description: PARAMETER_DESCRIPTIONS.informationToGet, + }, + includeFullDocs: { + type: "boolean", + description: PARAMETER_DESCRIPTIONS.includeFullDocs, + default: DEFAULT_VALUES.includeFullDocs, + }, + limit: { + type: "number", + description: PARAMETER_DESCRIPTIONS.limit, + default: DEFAULT_VALUES.limit, + }, + }, + required: ["informationToGet"], + }, + } satisfies OpenAI.FunctionDefinition, + + addMemory: { + name: "addMemory", + description: TOOL_DESCRIPTIONS.addMemory, + parameters: { + type: "object", + properties: { + memory: { + type: "string", + description: PARAMETER_DESCRIPTIONS.memory, + }, + }, + required: ["memory"], + }, + } satisfies OpenAI.FunctionDefinition, +} as const + +/** + * Create a Supermemory client with configuration + */ +function createClient(apiKey: string, config?: SupermemoryToolsConfig) { + const client = new Supermemory({ + apiKey, + ...(config?.baseUrl && { baseURL: config.baseUrl }), + }) + + const containerTags = getContainerTags(config) + + return { client, containerTags } +} + +/** + * Search memories function + */ +export function createSearchMemoriesFunction( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const { client, containerTags } = createClient(apiKey, config) + + return async function searchMemories({ + informationToGet, + includeFullDocs = DEFAULT_VALUES.includeFullDocs, + limit = DEFAULT_VALUES.limit, + }: { + informationToGet: string + includeFullDocs?: boolean + limit?: number + }): Promise { + try { + const response = await client.search.execute({ + q: informationToGet, + containerTags, + limit, + chunkThreshold: DEFAULT_VALUES.chunkThreshold, + includeFullDocs, + }) + + return { + success: true, + results: response.results, + count: response.results?.length || 0, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + } +} + +/** + * Add memory function + */ +export function createAddMemoryFunction( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const { client, containerTags } = createClient(apiKey, config) + + return async function addMemory({ + memory, + }: { + memory: string + }): Promise { + try { + const metadata: Record = {} + + const response = await client.memories.add({ + content: memory, + containerTags, + ...(Object.keys(metadata).length > 0 && { metadata }), + }) + + return { + success: true, + memory: response, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + } +} + +/** + * Create all memory tools functions + */ +export function supermemoryTools( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const searchMemories = createSearchMemoriesFunction(apiKey, config) + const addMemory = createAddMemoryFunction(apiKey, config) + + return { + searchMemories, + addMemory, + } +} + +/** + * Get OpenAI function definitions for all memory tools + */ +export function getToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] { + return [ + { type: "function", function: memoryToolSchemas.searchMemories }, + { type: "function", function: memoryToolSchemas.addMemory }, + ] +} + +/** + * Execute a tool call based on the function name and arguments + */ +export function createToolCallExecutor( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const tools = supermemoryTools(apiKey, config) + + return async function executeToolCall( + toolCall: OpenAI.Chat.Completions.ChatCompletionMessageToolCall, + ): Promise { + const functionName = toolCall.function.name + const args = JSON.parse(toolCall.function.arguments) + + switch (functionName) { + case "searchMemories": + return JSON.stringify(await tools.searchMemories(args)) + case "addMemory": + return JSON.stringify(await tools.addMemory(args)) + default: + return JSON.stringify({ + success: false, + error: `Unknown function: ${functionName}`, + }) + } + } +} + +/** + * Execute tool calls from OpenAI function calling + */ +export function createToolCallsExecutor( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const executeToolCall = createToolCallExecutor(apiKey, config) + + return async function executeToolCalls( + toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[], + ): Promise { + const results = await Promise.all( + toolCalls.map(async (toolCall) => { + const result = await executeToolCall(toolCall) + return { + tool_call_id: toolCall.id, + role: "tool" as const, + content: result, + } + }), + ) + + return results + } +} + +/** + * Individual tool creators for more granular control + */ +export function createSearchMemoriesTool( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const searchMemories = createSearchMemoriesFunction(apiKey, config) + + return { + definition: { + type: "function" as const, + function: memoryToolSchemas.searchMemories, + }, + execute: searchMemories, + } +} + +export function createAddMemoryTool( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const addMemory = createAddMemoryFunction(apiKey, config) + + return { + definition: { + type: "function" as const, + function: memoryToolSchemas.addMemory, + }, + execute: addMemory, + } +} diff --git a/packages/tools/test/chatapp/app/api/openai-chat/route.ts b/packages/tools/test/chatapp/app/api/openai-chat/route.ts new file mode 100644 index 00000000..1787cc58 --- /dev/null +++ b/packages/tools/test/chatapp/app/api/openai-chat/route.ts @@ -0,0 +1,31 @@ +import { OpenAI } from "openai" +//import { withSupermemory } from "@supermemory/tools/openai" +import { withSupermemory } from "../../../../../src/openai" + +export const runtime = "nodejs" + +export async function POST(req: Request) { + const { messages, conversationId } = (await req.json()) as { + messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] + conversationId: string + } + + const openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + }) + + const openaiWithSupermemory = withSupermemory(openai, "user-123", { + conversationId, + mode: "full", + addMemory: "always", + verbose: true, + }) + + const completion = await openaiWithSupermemory.chat.completions.create({ + model: "gpt-4o-mini", + messages, + }) + + const message = completion.choices?.[0]?.message + return Response.json({ message, usage: completion.usage }) +} diff --git a/packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx b/packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx new file mode 100644 index 00000000..4ed094cf --- /dev/null +++ b/packages/tools/test/chatapp/app/openai-chat/[chatId]/page.tsx @@ -0,0 +1,109 @@ +"use client" + +import { useParams, useRouter } from "next/navigation" +import { useState } from "react" +import type { OpenAI as OpenAIType } from "openai" + +type ChatMessage = OpenAIType.Chat.Completions.ChatCompletionMessageParam + +export default function ChatPage() { + const { chatId } = useParams() as { chatId: string } + const router = useRouter() + const [input, setInput] = useState("") + const [messages, setMessages] = useState([ + { role: "system", content: "You are a helpful assistant." }, + ]) + const [isLoading, setIsLoading] = useState(false) + + async function send() { + if (!input.trim() || isLoading) return + + const userMessage: ChatMessage = { role: "user", content: input } + setMessages((prev) => [...prev, userMessage]) + setInput("") + setIsLoading(true) + + try { + const res = await fetch("/api/openai-chat", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + messages: [...messages, userMessage], + conversationId: chatId, + }), + }) + const data = await res.json() + const assistant = data.message as ChatMessage | undefined + if (assistant) { + setMessages((prev) => [...prev, assistant]) + } + } catch (error) { + console.error("Error sending message:", error) + } finally { + setIsLoading(false) + } + } + + return ( +
+
+
+

+ OpenAI + SuperMemory +

+ +
+ +
+ {messages.map((message, index) => ( +
+ + {typeof message.content === "string" ? message.content : ""} + +
+ ))} + {isLoading && ( +
+ + Thinking... + +
+ )} +
+ +
+ setInput(e.target.value)} + onKeyDown={(e) => { + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault() + send() + } + }} + disabled={isLoading} + /> + +
+
+
+ ) +} diff --git a/packages/tools/test/chatapp/app/openai-chat/new/page.tsx b/packages/tools/test/chatapp/app/openai-chat/new/page.tsx new file mode 100644 index 00000000..fb168085 --- /dev/null +++ b/packages/tools/test/chatapp/app/openai-chat/new/page.tsx @@ -0,0 +1,10 @@ +import { redirect } from "next/navigation" + +function generateId() { + return crypto.randomUUID?.() || Math.random().toString(36).slice(2) +} + +export default function NewChatPage() { + const chatId = generateId() + redirect(`/openai-chat/${chatId}`) +} \ No newline at end of file diff --git a/packages/tools/test/chatapp/package.json b/packages/tools/test/chatapp/package.json index 746ca050..2649035d 100644 --- a/packages/tools/test/chatapp/package.json +++ b/packages/tools/test/chatapp/package.json @@ -14,6 +14,7 @@ "next": "16.0.0", "ai": "^4.0.0", "@ai-sdk/openai": "^1.0.0", + "openai": "^4.104.0", "supermemory": "^1.0.0", "@supermemory/tools": "workspace:*" }, diff --git a/packages/tools/tsdown.config.ts b/packages/tools/tsdown.config.ts index cebdda44..48e11ae1 100644 --- a/packages/tools/tsdown.config.ts +++ b/packages/tools/tsdown.config.ts @@ -4,8 +4,8 @@ export default defineConfig({ entry: [ "src/index.ts", "src/ai-sdk.ts", - "src/openai.ts", "src/claude-memory.ts", + "src/openai/index.ts", ], format: "esm", sourcemap: false, -- cgit v1.2.3