diff options
| author | Dhravya Shah <[email protected]> | 2025-10-03 02:41:49 -0700 |
|---|---|---|
| committer | Dhravya Shah <[email protected]> | 2025-10-03 02:41:49 -0700 |
| commit | 4d6fd37c99fd6af46d2f1aedbeb750e0029b3b8b (patch) | |
| tree | 869d415d87b152bfb5e55601311a2c8f6e64b6fc /apps/docs/memory-api | |
| parent | chore: fix docs again (diff) | |
| download | supermemory-4d6fd37c99fd6af46d2f1aedbeb750e0029b3b8b.tar.xz supermemory-4d6fd37c99fd6af46d2f1aedbeb750e0029b3b8b.zip | |
fix: model names
Diffstat (limited to 'apps/docs/memory-api')
| -rw-r--r-- | apps/docs/memory-api/sdks/openai-plugins.mdx | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/apps/docs/memory-api/sdks/openai-plugins.mdx b/apps/docs/memory-api/sdks/openai-plugins.mdx index 6550cee3..635e0008 100644 --- a/apps/docs/memory-api/sdks/openai-plugins.mdx +++ b/apps/docs/memory-api/sdks/openai-plugins.mdx @@ -53,7 +53,7 @@ async def main(): # Chat with memory tools response = await client.chat.completions.create( - model="gpt-4o", + model="gpt-5", messages=[ { "role": "system", @@ -99,7 +99,7 @@ const executeToolCall = createToolCallExecutor(process.env.SUPERMEMORY_API_KEY!, // Use with OpenAI Chat Completions const completion = await client.chat.completions.create({ - model: "gpt-4", + model: "gpt-5", messages: [ { role: "user", @@ -300,7 +300,7 @@ async def chat_with_memory(): # Get AI response with tools response = await client.chat.completions.create( - model="gpt-4o", + model="gpt-5", messages=messages, tools=tools.get_tool_definitions() ) @@ -319,7 +319,7 @@ async def chat_with_memory(): # Get final response after tool execution final_response = await client.chat.completions.create( - model="gpt-4o", + model="gpt-5", messages=messages ) @@ -370,7 +370,7 @@ async function chatWithMemory() { // Get AI response with tools const response = await client.chat.completions.create({ - model: "gpt-4", + model: "gpt-5", messages, tools: getToolDefinitions(), }) @@ -391,7 +391,7 @@ async function chatWithMemory() { // Get final response after tool execution const finalResponse = await client.chat.completions.create({ - model: "gpt-4", + model: "gpt-5", messages, }) @@ -433,7 +433,7 @@ async def safe_chat(): tools = SupermemoryTools(api_key="your-api-key") response = await client.chat.completions.create( - model="gpt-4o", + model="gpt-5", messages=[{"role": "user", "content": "Hello"}], tools=tools.get_tool_definitions() ) @@ -453,7 +453,7 @@ async function safeChat() { const client = new OpenAI() const response = await client.chat.completions.create({ - model: "gpt-4", + model: "gpt-5", messages: [{ role: "user", content: "Hello" }], tools: getToolDefinitions(), }) |