diff options
Diffstat (limited to 'packages/tools/src/openai')
| -rw-r--r-- | packages/tools/src/openai/index.ts | 92 | ||||
| -rw-r--r-- | packages/tools/src/openai/middleware.ts | 393 | ||||
| -rw-r--r-- | packages/tools/src/openai/tools.ts | 276 |
3 files changed, 761 insertions, 0 deletions
diff --git a/packages/tools/src/openai/index.ts b/packages/tools/src/openai/index.ts new file mode 100644 index 00000000..517fe282 --- /dev/null +++ b/packages/tools/src/openai/index.ts @@ -0,0 +1,92 @@ +import type OpenAI from "openai" +import { + createOpenAIMiddleware, + type OpenAIMiddlewareOptions, +} from "./middleware" + +/** + * Wraps an OpenAI client with SuperMemory middleware to automatically inject relevant memories + * into the system prompt based on the user's message content. + * + * This middleware searches the supermemory API for relevant memories using the container tag + * and user message, then either appends memories to an existing system prompt or creates + * a new system prompt with the memories. + * + * @param openaiClient - The OpenAI client to wrap with SuperMemory middleware + * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) + * @param options - Optional configuration options for the middleware + * @param options.conversationId - Optional conversation ID to group messages into a single document for contextual memory generation + * @param options.verbose - Optional flag to enable detailed logging of memory search and injection process (default: false) + * @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full" + * @param options.addMemory - Optional mode for memory addition: "always" (default), "never" + * + * @returns An OpenAI client with SuperMemory middleware injected + * + * @example + * ```typescript + * import { withSupermemory } from "@supermemory/tools/openai" + * import OpenAI from "openai" + * + * // Create OpenAI client with supermemory middleware + * const openai = new OpenAI({ + * apiKey: process.env.OPENAI_API_KEY, + * }) + * const openaiWithSupermemory = withSupermemory(openai, "user-123", { + * conversationId: "conversation-456", + * mode: "full", + * addMemory: "always" + * }) + * + * // Use normally - memories will be automatically injected + * const response = await openaiWithSupermemory.chat.completions.create({ + * model: "gpt-4", + * messages: [ + * { role: "user", content: "What's my favorite programming language?" } + * ] + * }) + * ``` + * + * @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set + * @throws {Error} When supermemory API request fails + */ +export function withSupermemory( + openaiClient: OpenAI, + containerTag: string, + options?: OpenAIMiddlewareOptions, +) { + if (!process.env.SUPERMEMORY_API_KEY) { + throw new Error("SUPERMEMORY_API_KEY is not set") + } + + const conversationId = options?.conversationId + const verbose = options?.verbose ?? false + const mode = options?.mode ?? "profile" + const addMemory = options?.addMemory ?? "never" + + const openaiWithSupermemory = createOpenAIMiddleware( + openaiClient, + containerTag, + { + conversationId, + verbose, + mode, + addMemory, + }, + ) + + return openaiWithSupermemory +} + +export type { OpenAIMiddlewareOptions } +export type { MemorySearchResult, MemoryAddResult } from "./tools" +export { + createSearchMemoriesFunction, + createAddMemoryFunction, + supermemoryTools, + getToolDefinitions, + createToolCallExecutor, + createToolCallsExecutor, + createSearchMemoriesTool, + createAddMemoryTool, + memoryToolSchemas, +} from "./tools" diff --git a/packages/tools/src/openai/middleware.ts b/packages/tools/src/openai/middleware.ts new file mode 100644 index 00000000..8d83874d --- /dev/null +++ b/packages/tools/src/openai/middleware.ts @@ -0,0 +1,393 @@ +import type OpenAI from "openai" +import Supermemory from "supermemory" +import { createLogger, type Logger } from "../vercel/logger" +import { convertProfileToMarkdown } from "../vercel/util" + +export interface OpenAIMiddlewareOptions { + conversationId?: string + verbose?: boolean + mode?: "profile" | "query" | "full" + addMemory?: "always" | "never" +} + +interface SupermemoryProfileSearch { + profile: { + static?: Array<{ memory: string; metadata?: Record<string, unknown> }> + dynamic?: Array<{ memory: string; metadata?: Record<string, unknown> }> + } + searchResults: { + results: Array<{ memory: string; metadata?: Record<string, unknown> }> + } +} + +/** + * Extracts the last user message from an array of chat completion messages. + * + * Searches through the messages array in reverse order to find the most recent + * message with role "user" and returns its content as a string. + * + * @param messages - Array of chat completion message parameters + * @returns The content of the last user message, or empty string if none found + * + * @example + * ```typescript + * const messages = [ + * { role: "system", content: "You are a helpful assistant." }, + * { role: "user", content: "Hello there!" }, + * { role: "assistant", content: "Hi! How can I help you?" }, + * { role: "user", content: "What's the weather like?" } + * ] + * + * const lastMessage = getLastUserMessage(messages) + * // Returns: "What's the weather like?" + * ``` + */ +const getLastUserMessage = ( + messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], +) => { + const lastUserMessage = messages + .slice() + .reverse() + .find((msg) => msg.role === "user") + + return typeof lastUserMessage?.content === "string" + ? lastUserMessage.content + : "" +} + +/** + * Searches for memories using the SuperMemory profile API. + * + * Makes a POST request to the SuperMemory API to retrieve user profile memories + * and search results based on the provided container tag and optional query text. + * + * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) + * @param queryText - Optional query text to search for specific memories. If empty, returns all profile memories + * @returns Promise that resolves to the SuperMemory profile search response + * @throws {Error} When the API request fails or returns an error status + * + * @example + * ```typescript + * // Search with query + * const results = await supermemoryProfileSearch("user-123", "favorite programming language") + * + * // Get all profile memories + * const profile = await supermemoryProfileSearch("user-123", "") + * ``` + */ +const supermemoryProfileSearch = async ( + containerTag: string, + queryText: string, +): Promise<SupermemoryProfileSearch> => { + const payload = queryText + ? JSON.stringify({ + q: queryText, + containerTag: containerTag, + }) + : JSON.stringify({ + containerTag: containerTag, + }) + + try { + const response = await fetch("https://api.supermemory.ai/v4/profile", { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${process.env.SUPERMEMORY_API_KEY}`, + }, + body: payload, + }) + + if (!response.ok) { + const errorText = await response.text().catch(() => "Unknown error") + throw new Error( + `Supermemory profile search failed: ${response.status} ${response.statusText}. ${errorText}`, + ) + } + + return await response.json() + } catch (error) { + if (error instanceof Error) { + throw error + } + throw new Error(`Supermemory API request failed: ${error}`) + } +} + +/** + * Adds memory-enhanced system prompts to chat completion messages. + * + * Searches for relevant memories based on the specified mode and injects them + * into the conversation. If a system prompt already exists, memories are appended + * to it. Otherwise, a new system prompt is created with the memories. + * + * @param messages - Array of chat completion message parameters + * @param containerTag - The container tag/identifier for memory search + * @param logger - Logger instance for debugging and info output + * @param mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both) + * @returns Promise that resolves to enhanced messages with memory-injected system prompt + * + * @example + * ```typescript + * const messages = [ + * { role: "user", content: "What's my favorite programming language?" } + * ] + * + * const enhancedMessages = await addSystemPrompt( + * messages, + * "user-123", + * logger, + * "full" + * ) + * // Returns messages with system prompt containing relevant memories + * ``` + */ +const addSystemPrompt = async ( + messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], + containerTag: string, + logger: Logger, + mode: "profile" | "query" | "full", +) => { + const systemPromptExists = messages.some((msg) => msg.role === "system") + + const queryText = mode !== "profile" ? getLastUserMessage(messages) : "" + + const memoriesResponse = await supermemoryProfileSearch( + containerTag, + queryText, + ) + + const memoryCountStatic = memoriesResponse.profile.static?.length || 0 + const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 + + logger.info("Memory search completed", { + containerTag, + memoryCountStatic, + memoryCountDynamic, + queryText: + queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), + mode, + }) + + const profileData = + mode !== "query" + ? convertProfileToMarkdown({ + profile: { + static: memoriesResponse.profile.static?.map((item) => item.memory), + dynamic: memoriesResponse.profile.dynamic?.map( + (item) => item.memory, + ), + }, + searchResults: { + results: memoriesResponse.searchResults.results.map((item) => ({ + memory: item.memory, + })) as [{ memory: string }], + }, + }) + : "" + const searchResultsMemories = + mode !== "profile" + ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results + .map((result) => `- ${result.memory}`) + .join("\n")}` + : "" + + const memories = `${profileData}\n${searchResultsMemories}`.trim() + + if (memories) { + logger.debug("Memory content preview", { + content: memories, + fullLength: memories.length, + }) + } + + if (systemPromptExists) { + logger.debug("Added memories to existing system prompt") + return messages.map((msg) => + msg.role === "system" + ? { ...msg, content: `${msg.content} \n ${memories}` } + : msg, + ) + } + + logger.debug( + "System prompt does not exist, created system prompt with memories", + ) + return [{ role: "system" as const, content: memories }, ...messages] +} + +/** + * Converts an array of chat completion messages into a formatted conversation string. + * + * Transforms the messages array into a readable conversation format where each + * message is prefixed with its role (User/Assistant) and messages are separated + * by double newlines. + * + * @param messages - Array of chat completion message parameters + * @returns Formatted conversation string with role prefixes + * + * @example + * ```typescript + * const messages = [ + * { role: "user", content: "Hello!" }, + * { role: "assistant", content: "Hi there!" }, + * { role: "user", content: "How are you?" } + * ] + * + * const conversation = getConversationContent(messages) + * // Returns: "User: Hello!\n\nAssistant: Hi there!\n\nUser: How are you?" + * ``` + */ +const getConversationContent = ( + messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[], +) => { + return messages + .map((msg) => { + const role = msg.role === "user" ? "User" : "Assistant" + const content = typeof msg.content === "string" ? msg.content : "" + return `${role}: ${content}` + }) + .join("\n\n") +} + +/** + * Adds a new memory to the SuperMemory system. + * + * Saves the provided content as a memory with the specified container tag and + * optional custom ID. Logs success or failure information for debugging. + * + * @param client - SuperMemory client instance + * @param containerTag - The container tag/identifier for the memory + * @param content - The content to save as a memory + * @param customId - Optional custom ID for the memory (e.g., conversation ID) + * @param logger - Logger instance for debugging and info output + * @returns Promise that resolves when memory is saved (or fails silently) + * + * @example + * ```typescript + * await addMemoryTool( + * supermemoryClient, + * "user-123", + * "User prefers React with TypeScript", + * "conversation-456", + * logger + * ) + * ``` + */ +const addMemoryTool = async ( + client: Supermemory, + containerTag: string, + content: string, + customId: string | undefined, + logger: Logger, +): Promise<void> => { + try { + const response = await client.memories.add({ + content, + containerTags: [containerTag], + customId, + }) + + logger.info("Memory saved successfully", { + containerTag, + customId, + contentLength: content.length, + memoryId: response.id, + }) + } catch (error) { + logger.error("Error saving memory", { + error: error instanceof Error ? error.message : "Unknown error", + }) + } +} + +/** + * Creates SuperMemory middleware for OpenAI clients. + * + * This function creates middleware that automatically injects relevant memories + * into OpenAI chat completions and optionally saves new memories. The middleware + * can wrap existing OpenAI clients or create new ones with SuperMemory capabilities. + * + * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) + * @param options - Optional configuration options for the middleware + * @param options.conversationId - Optional conversation ID to group messages for contextual memory generation + * @param options.verbose - Enable detailed logging of memory operations (default: false) + * @param options.mode - Memory search mode: "profile" (all memories), "query" (search-based), or "full" (both) (default: "profile") + * @param options.addMemory - Automatic memory storage mode: "always" or "never" (default: "never") + * @returns Object with `wrapClient` and `createClient` methods + * @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set + * + * @example + * ```typescript + * const openaiWithSupermemory = createOpenAIMiddleware(openai, "user-123", { + * conversationId: "conversation-456", + * mode: "full", + * addMemory: "always", + * verbose: true + * }) + * + * ``` + */ +export function createOpenAIMiddleware( + openaiClient: OpenAI, + containerTag: string, + options?: OpenAIMiddlewareOptions, +) { + const logger = createLogger(options?.verbose ?? false) + const client = new Supermemory({ + apiKey: process.env.SUPERMEMORY_API_KEY, + }) + + const originalCreate = openaiClient.chat.completions.create + + const createWithMemory = async ( + params: OpenAI.Chat.Completions.ChatCompletionCreateParams, + ) => { + const messages = Array.isArray(params.messages) ? params.messages : [] + + if (addMemory === "always") { + const userMessage = getLastUserMessage(messages) + if (userMessage?.trim()) { + const content = conversationId + ? getConversationContent(messages) + : userMessage + const customId = conversationId + ? `conversation:${conversationId}` + : undefined + + addMemoryTool(client, containerTag, content, customId, logger) + } + } + + if (mode !== "profile") { + const userMessage = getLastUserMessage(messages) + if (!userMessage) { + logger.debug("No user message found, skipping memory search") + return originalCreate.call(openaiClient.chat.completions, params) + } + } + + logger.info("Starting memory search", { + containerTag, + conversationId, + mode, + }) + + const enhancedMessages = await addSystemPrompt( + messages, + containerTag, + logger, + mode, + ) + + return originalCreate.call(openaiClient.chat.completions, { + ...params, + messages: enhancedMessages, + }) + } + + openaiClient.chat.completions.create = + createWithMemory as typeof originalCreate + + return openaiClient +} diff --git a/packages/tools/src/openai/tools.ts b/packages/tools/src/openai/tools.ts new file mode 100644 index 00000000..4078df09 --- /dev/null +++ b/packages/tools/src/openai/tools.ts @@ -0,0 +1,276 @@ +import type OpenAI from "openai" +import Supermemory from "supermemory" +import { + DEFAULT_VALUES, + PARAMETER_DESCRIPTIONS, + TOOL_DESCRIPTIONS, + getContainerTags, +} from "../shared" +import type { SupermemoryToolsConfig } from "../types" + +/** + * Result types for memory operations + */ +export interface MemorySearchResult { + success: boolean + results?: Awaited<ReturnType<Supermemory["search"]["execute"]>>["results"] + count?: number + error?: string +} + +export interface MemoryAddResult { + success: boolean + memory?: Awaited<ReturnType<Supermemory["memories"]["add"]>> + error?: string +} + +/** + * Function schemas for OpenAI function calling + */ +export const memoryToolSchemas = { + searchMemories: { + name: "searchMemories", + description: TOOL_DESCRIPTIONS.searchMemories, + parameters: { + type: "object", + properties: { + informationToGet: { + type: "string", + description: PARAMETER_DESCRIPTIONS.informationToGet, + }, + includeFullDocs: { + type: "boolean", + description: PARAMETER_DESCRIPTIONS.includeFullDocs, + default: DEFAULT_VALUES.includeFullDocs, + }, + limit: { + type: "number", + description: PARAMETER_DESCRIPTIONS.limit, + default: DEFAULT_VALUES.limit, + }, + }, + required: ["informationToGet"], + }, + } satisfies OpenAI.FunctionDefinition, + + addMemory: { + name: "addMemory", + description: TOOL_DESCRIPTIONS.addMemory, + parameters: { + type: "object", + properties: { + memory: { + type: "string", + description: PARAMETER_DESCRIPTIONS.memory, + }, + }, + required: ["memory"], + }, + } satisfies OpenAI.FunctionDefinition, +} as const + +/** + * Create a Supermemory client with configuration + */ +function createClient(apiKey: string, config?: SupermemoryToolsConfig) { + const client = new Supermemory({ + apiKey, + ...(config?.baseUrl && { baseURL: config.baseUrl }), + }) + + const containerTags = getContainerTags(config) + + return { client, containerTags } +} + +/** + * Search memories function + */ +export function createSearchMemoriesFunction( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const { client, containerTags } = createClient(apiKey, config) + + return async function searchMemories({ + informationToGet, + includeFullDocs = DEFAULT_VALUES.includeFullDocs, + limit = DEFAULT_VALUES.limit, + }: { + informationToGet: string + includeFullDocs?: boolean + limit?: number + }): Promise<MemorySearchResult> { + try { + const response = await client.search.execute({ + q: informationToGet, + containerTags, + limit, + chunkThreshold: DEFAULT_VALUES.chunkThreshold, + includeFullDocs, + }) + + return { + success: true, + results: response.results, + count: response.results?.length || 0, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + } +} + +/** + * Add memory function + */ +export function createAddMemoryFunction( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const { client, containerTags } = createClient(apiKey, config) + + return async function addMemory({ + memory, + }: { + memory: string + }): Promise<MemoryAddResult> { + try { + const metadata: Record<string, string | number | boolean> = {} + + const response = await client.memories.add({ + content: memory, + containerTags, + ...(Object.keys(metadata).length > 0 && { metadata }), + }) + + return { + success: true, + memory: response, + } + } catch (error) { + return { + success: false, + error: error instanceof Error ? error.message : "Unknown error", + } + } + } +} + +/** + * Create all memory tools functions + */ +export function supermemoryTools( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const searchMemories = createSearchMemoriesFunction(apiKey, config) + const addMemory = createAddMemoryFunction(apiKey, config) + + return { + searchMemories, + addMemory, + } +} + +/** + * Get OpenAI function definitions for all memory tools + */ +export function getToolDefinitions(): OpenAI.Chat.Completions.ChatCompletionTool[] { + return [ + { type: "function", function: memoryToolSchemas.searchMemories }, + { type: "function", function: memoryToolSchemas.addMemory }, + ] +} + +/** + * Execute a tool call based on the function name and arguments + */ +export function createToolCallExecutor( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const tools = supermemoryTools(apiKey, config) + + return async function executeToolCall( + toolCall: OpenAI.Chat.Completions.ChatCompletionMessageToolCall, + ): Promise<string> { + const functionName = toolCall.function.name + const args = JSON.parse(toolCall.function.arguments) + + switch (functionName) { + case "searchMemories": + return JSON.stringify(await tools.searchMemories(args)) + case "addMemory": + return JSON.stringify(await tools.addMemory(args)) + default: + return JSON.stringify({ + success: false, + error: `Unknown function: ${functionName}`, + }) + } + } +} + +/** + * Execute tool calls from OpenAI function calling + */ +export function createToolCallsExecutor( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const executeToolCall = createToolCallExecutor(apiKey, config) + + return async function executeToolCalls( + toolCalls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[], + ): Promise<OpenAI.Chat.Completions.ChatCompletionToolMessageParam[]> { + const results = await Promise.all( + toolCalls.map(async (toolCall) => { + const result = await executeToolCall(toolCall) + return { + tool_call_id: toolCall.id, + role: "tool" as const, + content: result, + } + }), + ) + + return results + } +} + +/** + * Individual tool creators for more granular control + */ +export function createSearchMemoriesTool( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const searchMemories = createSearchMemoriesFunction(apiKey, config) + + return { + definition: { + type: "function" as const, + function: memoryToolSchemas.searchMemories, + }, + execute: searchMemories, + } +} + +export function createAddMemoryTool( + apiKey: string, + config?: SupermemoryToolsConfig, +) { + const addMemory = createAddMemoryFunction(apiKey, config) + + return { + definition: { + type: "function" as const, + function: memoryToolSchemas.addMemory, + }, + execute: addMemory, + } +} |