From 7b0baea0f28d98746bbfa3656f8a260caea935e2 Mon Sep 17 00:00:00 2001 From: Shoubhit Dash Date: Fri, 7 Nov 2025 20:09:44 +0530 Subject: add support for responses api in openai typescript sdk (#549) --- packages/tools/src/openai/index.ts | 23 ++- packages/tools/src/openai/middleware.ts | 224 +++++++++++++++++++-------- packages/tools/test/openai-responses-test.ts | 21 +++ 3 files changed, 200 insertions(+), 68 deletions(-) create mode 100644 packages/tools/test/openai-responses-test.ts diff --git a/packages/tools/src/openai/index.ts b/packages/tools/src/openai/index.ts index a13f5ed8..54e98402 100644 --- a/packages/tools/src/openai/index.ts +++ b/packages/tools/src/openai/index.ts @@ -6,11 +6,13 @@ import { /** * Wraps an OpenAI client with SuperMemory middleware to automatically inject relevant memories - * into the system prompt based on the user's message content. + * into both Chat Completions and Responses APIs based on the user's input content. * - * This middleware searches the supermemory API for relevant memories using the container tag - * and user message, then either appends memories to an existing system prompt or creates - * a new system prompt with the memories. + * For Chat Completions API: Searches for memories using the user message content and injects + * them into the system prompt (appends to existing or creates new system prompt). + * + * For Responses API: Searches for memories using the input parameter and injects them into + * the instructions parameter (appends to existing or creates new instructions). * * @param openaiClient - The OpenAI client to wrap with SuperMemory middleware * @param containerTag - The container tag/identifier for memory search (e.g., user ID, project ID) @@ -20,7 +22,7 @@ import { * @param options.mode - Optional mode for memory search: "profile" (default), "query", or "full" * @param options.addMemory - Optional mode for memory addition: "always", "never" (default) * - * @returns An OpenAI client with SuperMemory middleware injected + * @returns An OpenAI client with SuperMemory middleware injected for both Chat Completions and Responses APIs * * @example * ```typescript @@ -37,13 +39,20 @@ import { * addMemory: "always" * }) * - * // Use normally - memories will be automatically injected - * const response = await openaiWithSupermemory.chat.completions.create({ + * // Use with Chat Completions API - memories injected into system prompt + * const chatResponse = await openaiWithSupermemory.chat.completions.create({ * model: "gpt-4", * messages: [ * { role: "user", content: "What's my favorite programming language?" } * ] * }) + * + * // Use with Responses API - memories injected into instructions + * const response = await openaiWithSupermemory.responses.create({ + * model: "gpt-4o", + * instructions: "You are a helpful coding assistant", + * input: "What's my favorite programming language?" + * }) * ``` * * @throws {Error} When SUPERMEMORY_API_KEY environment variable is not set diff --git a/packages/tools/src/openai/middleware.ts b/packages/tools/src/openai/middleware.ts index 8d83874d..3a851b31 100644 --- a/packages/tools/src/openai/middleware.ts +++ b/packages/tools/src/openai/middleware.ts @@ -152,54 +152,13 @@ const addSystemPrompt = async ( const queryText = mode !== "profile" ? getLastUserMessage(messages) : "" - const memoriesResponse = await supermemoryProfileSearch( - containerTag, + const memories = await searchAndFormatMemories( queryText, - ) - - const memoryCountStatic = memoriesResponse.profile.static?.length || 0 - const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 - - logger.info("Memory search completed", { containerTag, - memoryCountStatic, - memoryCountDynamic, - queryText: - queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), + logger, mode, - }) - - const profileData = - mode !== "query" - ? convertProfileToMarkdown({ - profile: { - static: memoriesResponse.profile.static?.map((item) => item.memory), - dynamic: memoriesResponse.profile.dynamic?.map( - (item) => item.memory, - ), - }, - searchResults: { - results: memoriesResponse.searchResults.results.map((item) => ({ - memory: item.memory, - })) as [{ memory: string }], - }, - }) - : "" - const searchResultsMemories = - mode !== "profile" - ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results - .map((result) => `- ${result.memory}`) - .join("\n")}` - : "" - - const memories = `${profileData}\n${searchResultsMemories}`.trim() - - if (memories) { - logger.debug("Memory content preview", { - content: memories, - fullLength: memories.length, - }) - } + "chat", + ) if (systemPromptExists) { logger.debug("Added memories to existing system prompt") @@ -338,27 +297,145 @@ export function createOpenAIMiddleware( apiKey: process.env.SUPERMEMORY_API_KEY, }) + const conversationId = options?.conversationId + const mode = options?.mode ?? "profile" + const addMemory = options?.addMemory ?? "never" + const originalCreate = openaiClient.chat.completions.create + const originalResponsesCreate = openaiClient.responses?.create + + /** + * Searches for memories and formats them for injection into API calls. + * + * This shared function handles memory search and formatting for both Chat Completions + * and Responses APIs, reducing code duplication. + * + * @param queryText - The text to search for (empty string for profile-only mode) + * @param containerTag - The container tag for memory search + * @param logger - Logger instance + * @param mode - Memory search mode + * @param context - API context for logging differentiation + * @returns Formatted memories string + */ + const searchAndFormatMemories = async ( + queryText: string, + containerTag: string, + logger: Logger, + mode: "profile" | "query" | "full", + context: "chat" | "responses", + ) => { + const memoriesResponse = await supermemoryProfileSearch( + containerTag, + queryText, + ) - const createWithMemory = async ( - params: OpenAI.Chat.Completions.ChatCompletionCreateParams, + const memoryCountStatic = memoriesResponse.profile.static?.length || 0 + const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0 + + logger.info(`Memory search completed for ${context} API`, { + containerTag, + memoryCountStatic, + memoryCountDynamic, + queryText: + queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""), + mode, + }) + + const profileData = + mode !== "query" + ? convertProfileToMarkdown({ + profile: { + static: memoriesResponse.profile.static?.map((item) => item.memory), + dynamic: memoriesResponse.profile.dynamic?.map( + (item) => item.memory, + ), + }, + searchResults: { + results: memoriesResponse.searchResults.results.map((item) => ({ + memory: item.memory, + })) as [{ memory: string }], + }, + }) + : "" + const searchResultsMemories = + mode !== "profile" + ? `Search results for user's ${context === "chat" ? "recent message" : "input"}: \n${memoriesResponse.searchResults.results + .map((result) => `- ${result.memory}`) + .join("\n")}` + : "" + + const memories = `${profileData}\n${searchResultsMemories}`.trim() + + if (memories) { + logger.debug(`Memory content preview for ${context} API`, { + content: memories, + fullLength: memories.length, + }) + } + + return memories + } + + const createResponsesWithMemory = async ( + params: Parameters[0], ) => { - const messages = Array.isArray(params.messages) ? params.messages : [] + if (!originalResponsesCreate) { + throw new Error("Responses API is not available in this OpenAI client version") + } - if (addMemory === "always") { - const userMessage = getLastUserMessage(messages) - if (userMessage?.trim()) { - const content = conversationId - ? getConversationContent(messages) - : userMessage - const customId = conversationId - ? `conversation:${conversationId}` - : undefined + const input = typeof params.input === "string" ? params.input : "" - addMemoryTool(client, containerTag, content, customId, logger) - } + if (mode !== "profile" && !input) { + logger.debug("No input found for Responses API, skipping memory search") + return originalResponsesCreate.call(openaiClient.responses, params) } + logger.info("Starting memory search for Responses API", { + containerTag, + conversationId, + mode, + }) + + const operations: Promise[] = [] + + if (addMemory === "always" && input?.trim()) { + const content = conversationId + ? `Input: ${input}` + : input + const customId = conversationId + ? `conversation:${conversationId}` + : undefined + + operations.push(addMemoryTool(client, containerTag, content, customId, logger)) + } + + const queryText = mode !== "profile" ? input : "" + operations.push(searchAndFormatMemories( + queryText, + containerTag, + logger, + mode, + "responses", + )) + + const results = await Promise.all(operations) + const memories = results[results.length - 1] // Memory search result is always last + + const enhancedInstructions = memories + ? `${params.instructions || ""}\n\n${memories}`.trim() + : params.instructions + + return originalResponsesCreate.call(openaiClient.responses, { + ...params, + instructions: enhancedInstructions, + }) + } + + const createWithMemory = async ( + params: OpenAI.Chat.Completions.ChatCompletionCreateParams, + ) => { + const messages = Array.isArray(params.messages) ? params.messages : [] + if (mode !== "profile") { const userMessage = getLastUserMessage(messages) if (!userMessage) { @@ -373,12 +450,31 @@ export function createOpenAIMiddleware( mode, }) - const enhancedMessages = await addSystemPrompt( + const operations: Promise[] = [] + + if (addMemory === "always") { + const userMessage = getLastUserMessage(messages) + if (userMessage?.trim()) { + const content = conversationId + ? getConversationContent(messages) + : userMessage + const customId = conversationId + ? `conversation:${conversationId}` + : undefined + + operations.push(addMemoryTool(client, containerTag, content, customId, logger)) + } + } + + operations.push(addSystemPrompt( messages, containerTag, logger, mode, - ) + )) + + const results = await Promise.all(operations) + const enhancedMessages = results[results.length - 1] // Enhanced messages result is always last return originalCreate.call(openaiClient.chat.completions, { ...params, @@ -389,5 +485,11 @@ export function createOpenAIMiddleware( openaiClient.chat.completions.create = createWithMemory as typeof originalCreate + // Wrap Responses API if available + if (originalResponsesCreate) { + openaiClient.responses.create = + createResponsesWithMemory as typeof originalResponsesCreate + } + return openaiClient } diff --git a/packages/tools/test/openai-responses-test.ts b/packages/tools/test/openai-responses-test.ts new file mode 100644 index 00000000..776e7574 --- /dev/null +++ b/packages/tools/test/openai-responses-test.ts @@ -0,0 +1,21 @@ +import { OpenAI } from "openai" +import { withSupermemory } from "../src/openai" + +const openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, +}) + +const openaiWithSupermemory = withSupermemory(openai, "user_id_life", { + verbose: true, + mode: "full", + addMemory: "always", +}) + +const response = await openaiWithSupermemory.responses.create({ + model: "gpt-4o", + instructions: "you are ai girlfriend", + input: "Where do i live?", +}) + +console.log("Response output:", JSON.stringify(response.output[0], null, 2)) +console.log("Usage:", response.usage) -- cgit v1.2.3