aboutsummaryrefslogtreecommitdiff
path: root/packages/tools/src/openai/middleware.ts
diff options
context:
space:
mode:
Diffstat (limited to 'packages/tools/src/openai/middleware.ts')
-rw-r--r--packages/tools/src/openai/middleware.ts224
1 files changed, 163 insertions, 61 deletions
diff --git a/packages/tools/src/openai/middleware.ts b/packages/tools/src/openai/middleware.ts
index 8d83874d..3a851b31 100644
--- a/packages/tools/src/openai/middleware.ts
+++ b/packages/tools/src/openai/middleware.ts
@@ -152,54 +152,13 @@ const addSystemPrompt = async (
const queryText = mode !== "profile" ? getLastUserMessage(messages) : ""
- const memoriesResponse = await supermemoryProfileSearch(
- containerTag,
+ const memories = await searchAndFormatMemories(
queryText,
- )
-
- const memoryCountStatic = memoriesResponse.profile.static?.length || 0
- const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0
-
- logger.info("Memory search completed", {
containerTag,
- memoryCountStatic,
- memoryCountDynamic,
- queryText:
- queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""),
+ logger,
mode,
- })
-
- const profileData =
- mode !== "query"
- ? convertProfileToMarkdown({
- profile: {
- static: memoriesResponse.profile.static?.map((item) => item.memory),
- dynamic: memoriesResponse.profile.dynamic?.map(
- (item) => item.memory,
- ),
- },
- searchResults: {
- results: memoriesResponse.searchResults.results.map((item) => ({
- memory: item.memory,
- })) as [{ memory: string }],
- },
- })
- : ""
- const searchResultsMemories =
- mode !== "profile"
- ? `Search results for user's recent message: \n${memoriesResponse.searchResults.results
- .map((result) => `- ${result.memory}`)
- .join("\n")}`
- : ""
-
- const memories = `${profileData}\n${searchResultsMemories}`.trim()
-
- if (memories) {
- logger.debug("Memory content preview", {
- content: memories,
- fullLength: memories.length,
- })
- }
+ "chat",
+ )
if (systemPromptExists) {
logger.debug("Added memories to existing system prompt")
@@ -338,27 +297,145 @@ export function createOpenAIMiddleware(
apiKey: process.env.SUPERMEMORY_API_KEY,
})
+ const conversationId = options?.conversationId
+ const mode = options?.mode ?? "profile"
+ const addMemory = options?.addMemory ?? "never"
+
const originalCreate = openaiClient.chat.completions.create
+ const originalResponsesCreate = openaiClient.responses?.create
+
+ /**
+ * Searches for memories and formats them for injection into API calls.
+ *
+ * This shared function handles memory search and formatting for both Chat Completions
+ * and Responses APIs, reducing code duplication.
+ *
+ * @param queryText - The text to search for (empty string for profile-only mode)
+ * @param containerTag - The container tag for memory search
+ * @param logger - Logger instance
+ * @param mode - Memory search mode
+ * @param context - API context for logging differentiation
+ * @returns Formatted memories string
+ */
+ const searchAndFormatMemories = async (
+ queryText: string,
+ containerTag: string,
+ logger: Logger,
+ mode: "profile" | "query" | "full",
+ context: "chat" | "responses",
+ ) => {
+ const memoriesResponse = await supermemoryProfileSearch(
+ containerTag,
+ queryText,
+ )
- const createWithMemory = async (
- params: OpenAI.Chat.Completions.ChatCompletionCreateParams,
+ const memoryCountStatic = memoriesResponse.profile.static?.length || 0
+ const memoryCountDynamic = memoriesResponse.profile.dynamic?.length || 0
+
+ logger.info(`Memory search completed for ${context} API`, {
+ containerTag,
+ memoryCountStatic,
+ memoryCountDynamic,
+ queryText:
+ queryText.substring(0, 100) + (queryText.length > 100 ? "..." : ""),
+ mode,
+ })
+
+ const profileData =
+ mode !== "query"
+ ? convertProfileToMarkdown({
+ profile: {
+ static: memoriesResponse.profile.static?.map((item) => item.memory),
+ dynamic: memoriesResponse.profile.dynamic?.map(
+ (item) => item.memory,
+ ),
+ },
+ searchResults: {
+ results: memoriesResponse.searchResults.results.map((item) => ({
+ memory: item.memory,
+ })) as [{ memory: string }],
+ },
+ })
+ : ""
+ const searchResultsMemories =
+ mode !== "profile"
+ ? `Search results for user's ${context === "chat" ? "recent message" : "input"}: \n${memoriesResponse.searchResults.results
+ .map((result) => `- ${result.memory}`)
+ .join("\n")}`
+ : ""
+
+ const memories = `${profileData}\n${searchResultsMemories}`.trim()
+
+ if (memories) {
+ logger.debug(`Memory content preview for ${context} API`, {
+ content: memories,
+ fullLength: memories.length,
+ })
+ }
+
+ return memories
+ }
+
+ const createResponsesWithMemory = async (
+ params: Parameters<typeof originalResponsesCreate>[0],
) => {
- const messages = Array.isArray(params.messages) ? params.messages : []
+ if (!originalResponsesCreate) {
+ throw new Error("Responses API is not available in this OpenAI client version")
+ }
- if (addMemory === "always") {
- const userMessage = getLastUserMessage(messages)
- if (userMessage?.trim()) {
- const content = conversationId
- ? getConversationContent(messages)
- : userMessage
- const customId = conversationId
- ? `conversation:${conversationId}`
- : undefined
+ const input = typeof params.input === "string" ? params.input : ""
- addMemoryTool(client, containerTag, content, customId, logger)
- }
+ if (mode !== "profile" && !input) {
+ logger.debug("No input found for Responses API, skipping memory search")
+ return originalResponsesCreate.call(openaiClient.responses, params)
}
+ logger.info("Starting memory search for Responses API", {
+ containerTag,
+ conversationId,
+ mode,
+ })
+
+ const operations: Promise<any>[] = []
+
+ if (addMemory === "always" && input?.trim()) {
+ const content = conversationId
+ ? `Input: ${input}`
+ : input
+ const customId = conversationId
+ ? `conversation:${conversationId}`
+ : undefined
+
+ operations.push(addMemoryTool(client, containerTag, content, customId, logger))
+ }
+
+ const queryText = mode !== "profile" ? input : ""
+ operations.push(searchAndFormatMemories(
+ queryText,
+ containerTag,
+ logger,
+ mode,
+ "responses",
+ ))
+
+ const results = await Promise.all(operations)
+ const memories = results[results.length - 1] // Memory search result is always last
+
+ const enhancedInstructions = memories
+ ? `${params.instructions || ""}\n\n${memories}`.trim()
+ : params.instructions
+
+ return originalResponsesCreate.call(openaiClient.responses, {
+ ...params,
+ instructions: enhancedInstructions,
+ })
+ }
+
+ const createWithMemory = async (
+ params: OpenAI.Chat.Completions.ChatCompletionCreateParams,
+ ) => {
+ const messages = Array.isArray(params.messages) ? params.messages : []
+
if (mode !== "profile") {
const userMessage = getLastUserMessage(messages)
if (!userMessage) {
@@ -373,12 +450,31 @@ export function createOpenAIMiddleware(
mode,
})
- const enhancedMessages = await addSystemPrompt(
+ const operations: Promise<any>[] = []
+
+ if (addMemory === "always") {
+ const userMessage = getLastUserMessage(messages)
+ if (userMessage?.trim()) {
+ const content = conversationId
+ ? getConversationContent(messages)
+ : userMessage
+ const customId = conversationId
+ ? `conversation:${conversationId}`
+ : undefined
+
+ operations.push(addMemoryTool(client, containerTag, content, customId, logger))
+ }
+ }
+
+ operations.push(addSystemPrompt(
messages,
containerTag,
logger,
mode,
- )
+ ))
+
+ const results = await Promise.all(operations)
+ const enhancedMessages = results[results.length - 1] // Enhanced messages result is always last
return originalCreate.call(openaiClient.chat.completions, {
...params,
@@ -389,5 +485,11 @@ export function createOpenAIMiddleware(
openaiClient.chat.completions.create =
createWithMemory as typeof originalCreate
+ // Wrap Responses API if available
+ if (originalResponsesCreate) {
+ openaiClient.responses.create =
+ createResponsesWithMemory as typeof originalResponsesCreate
+ }
+
return openaiClient
}