aboutsummaryrefslogtreecommitdiff
path: root/packages/openai-sdk-python/src
diff options
context:
space:
mode:
authorCodeWithShreyans <[email protected]>2025-08-27 23:34:49 +0000
committerCodeWithShreyans <[email protected]>2025-08-27 23:34:49 +0000
commit3a0e264b7eb18fe3b6d2de25e79879ada7c9f3ec (patch)
tree4f2be5731860abece575bf4946ef881972502d73 /packages/openai-sdk-python/src
parentfeat: support project-specific installation commands (#390) (diff)
downloadsupermemory-3a0e264b7eb18fe3b6d2de25e79879ada7c9f3ec.tar.xz
supermemory-3a0e264b7eb18fe3b6d2de25e79879ada7c9f3ec.zip
feat: openai js and python sdk utilities (#389)shreyans/08-27-feat_openai_js_and_python_sdk_utilities
needs testing
Diffstat (limited to 'packages/openai-sdk-python/src')
-rw-r--r--packages/openai-sdk-python/src/__init__.py54
-rw-r--r--packages/openai-sdk-python/src/infinite_chat.py268
-rw-r--r--packages/openai-sdk-python/src/tools.py366
3 files changed, 688 insertions, 0 deletions
diff --git a/packages/openai-sdk-python/src/__init__.py b/packages/openai-sdk-python/src/__init__.py
new file mode 100644
index 00000000..47a7569e
--- /dev/null
+++ b/packages/openai-sdk-python/src/__init__.py
@@ -0,0 +1,54 @@
+"""Supermemory OpenAI SDK - Enhanced OpenAI Python SDK with infinite context."""
+
+from .infinite_chat import (
+ SupermemoryOpenAI,
+ SupermemoryInfiniteChatConfig,
+ SupermemoryInfiniteChatConfigWithProviderName,
+ SupermemoryInfiniteChatConfigWithProviderUrl,
+ ProviderName,
+ PROVIDER_MAP,
+ create_supermemory_openai,
+)
+
+from .tools import (
+ SupermemoryTools,
+ SupermemoryToolsConfig,
+ MemoryObject,
+ MemorySearchResult,
+ MemoryAddResult,
+ SearchMemoriesTool,
+ AddMemoryTool,
+ MEMORY_TOOL_SCHEMAS,
+ create_supermemory_tools,
+ get_memory_tool_definitions,
+ execute_memory_tool_calls,
+ create_search_memories_tool,
+ create_add_memory_tool,
+)
+
+__version__ = "0.1.0"
+
+__all__ = [
+ # Infinite Chat
+ "SupermemoryOpenAI",
+ "SupermemoryInfiniteChatConfig",
+ "SupermemoryInfiniteChatConfigWithProviderName",
+ "SupermemoryInfiniteChatConfigWithProviderUrl",
+ "ProviderName",
+ "PROVIDER_MAP",
+ "create_supermemory_openai",
+ # Tools
+ "SupermemoryTools",
+ "SupermemoryToolsConfig",
+ "MemoryObject",
+ "MemorySearchResult",
+ "MemoryAddResult",
+ "SearchMemoriesTool",
+ "AddMemoryTool",
+ "MEMORY_TOOL_SCHEMAS",
+ "create_supermemory_tools",
+ "get_memory_tool_definitions",
+ "execute_memory_tool_calls",
+ "create_search_memories_tool",
+ "create_add_memory_tool",
+]
diff --git a/packages/openai-sdk-python/src/infinite_chat.py b/packages/openai-sdk-python/src/infinite_chat.py
new file mode 100644
index 00000000..1d3890ae
--- /dev/null
+++ b/packages/openai-sdk-python/src/infinite_chat.py
@@ -0,0 +1,268 @@
+"""Enhanced OpenAI client with Supermemory infinite context integration."""
+
+from typing import Dict, List, Optional, Union, overload, Unpack
+from typing_extensions import Literal
+
+from openai import OpenAI, AsyncStream
+from openai.types.chat import (
+ ChatCompletion,
+ ChatCompletionMessageParam,
+ ChatCompletionToolParam,
+ ChatCompletionToolChoiceOptionParam,
+ CompletionCreateParams,
+)
+
+
+# Provider URL mapping
+PROVIDER_MAP = {
+ "openai": "https://api.openai.com/v1",
+ "anthropic": "https://api.anthropic.com/v1",
+ "openrouter": "https://openrouter.ai/api/v1",
+ "deepinfra": "https://api.deepinfra.com/v1/openai",
+ "groq": "https://api.groq.com/openai/v1",
+ "google": "https://generativelanguage.googleapis.com/v1beta/openai",
+ "cloudflare": "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
+}
+
+ProviderName = Literal[
+ "openai", "anthropic", "openrouter", "deepinfra", "groq", "google", "cloudflare"
+]
+
+
+class SupermemoryInfiniteChatConfigBase:
+ """Base configuration for Supermemory infinite chat."""
+
+ def __init__(
+ self,
+ provider_api_key: str,
+ headers: Optional[Dict[str, str]] = None,
+ ):
+ self.provider_api_key = provider_api_key
+ self.headers = headers or {}
+
+
+class SupermemoryInfiniteChatConfigWithProviderName(SupermemoryInfiniteChatConfigBase):
+ """Configuration using a predefined provider name."""
+
+ def __init__(
+ self,
+ provider_name: ProviderName,
+ provider_api_key: str,
+ headers: Optional[Dict[str, str]] = None,
+ ):
+ super().__init__(provider_api_key, headers)
+ self.provider_name = provider_name
+ self.provider_url: None = None
+
+
+class SupermemoryInfiniteChatConfigWithProviderUrl(SupermemoryInfiniteChatConfigBase):
+ """Configuration using a custom provider URL."""
+
+ def __init__(
+ self,
+ provider_url: str,
+ provider_api_key: str,
+ headers: Optional[Dict[str, str]] = None,
+ ):
+ super().__init__(provider_api_key, headers)
+ self.provider_url = provider_url
+ self.provider_name: None = None
+
+
+SupermemoryInfiniteChatConfig = Union[
+ SupermemoryInfiniteChatConfigWithProviderName,
+ SupermemoryInfiniteChatConfigWithProviderUrl,
+]
+
+
+class SupermemoryOpenAI(OpenAI):
+ """Enhanced OpenAI client with supermemory integration.
+
+ Only chat completions are supported - all other OpenAI API endpoints are disabled.
+ """
+
+ def __init__(
+ self,
+ supermemory_api_key: str,
+ config: Optional[SupermemoryInfiniteChatConfig] = None,
+ ):
+ """Initialize the SupermemoryOpenAI client.
+
+ Args:
+ supermemory_api_key: API key for Supermemory service
+ config: Configuration for the AI provider
+ """
+ # Determine base URL
+ if config is None:
+ base_url = "https://api.openai.com/v1"
+ api_key = None
+ headers = {}
+ elif hasattr(config, "provider_name") and config.provider_name:
+ base_url = PROVIDER_MAP[config.provider_name]
+ api_key = config.provider_api_key
+ headers = config.headers
+ else:
+ base_url = config.provider_url
+ api_key = config.provider_api_key
+ headers = config.headers
+
+ # Prepare default headers
+ default_headers = {
+ "x-supermemory-api-key": supermemory_api_key,
+ **headers,
+ }
+
+ # Initialize the parent OpenAI client
+ super().__init__(
+ api_key=api_key,
+ base_url=base_url,
+ default_headers=default_headers,
+ )
+
+ self._supermemory_api_key = supermemory_api_key
+
+ # Disable unsupported endpoints
+ self._disable_unsupported_endpoints()
+
+ def _disable_unsupported_endpoints(self) -> None:
+ """Disable all OpenAI endpoints except chat completions."""
+
+ def unsupported_error() -> None:
+ raise RuntimeError(
+ "Supermemory only supports chat completions. "
+ "Use chat_completion() or chat.completions.create() instead."
+ )
+
+ # List of endpoints to disable
+ endpoints = [
+ "embeddings",
+ "fine_tuning",
+ "images",
+ "audio",
+ "models",
+ "moderations",
+ "files",
+ "batches",
+ "uploads",
+ "beta",
+ ]
+
+ # Override endpoints with error function
+ for endpoint in endpoints:
+ setattr(self, endpoint, property(lambda self: unsupported_error()))
+
+ async def create_chat_completion(
+ self,
+ **params: Unpack[CompletionCreateParams],
+ ) -> ChatCompletion:
+ """Create chat completions with infinite context support.
+
+ Args:
+ **params: Parameters for chat completion
+
+ Returns:
+ ChatCompletion response
+ """
+ return await self.chat.completions.create(**params)
+
+ @overload
+ async def chat_completion(
+ self,
+ messages: List[ChatCompletionMessageParam],
+ *,
+ model: Optional[str] = None,
+ temperature: Optional[float] = None,
+ max_tokens: Optional[int] = None,
+ tools: Optional[List[ChatCompletionToolParam]] = None,
+ tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
+ stream: Literal[False] = False,
+ **kwargs: Unpack[CompletionCreateParams],
+ ) -> ChatCompletion: ...
+
+ @overload
+ async def chat_completion(
+ self,
+ messages: List[ChatCompletionMessageParam],
+ *,
+ model: Optional[str] = None,
+ temperature: Optional[float] = None,
+ max_tokens: Optional[int] = None,
+ tools: Optional[List[ChatCompletionToolParam]] = None,
+ tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
+ stream: Literal[True],
+ **kwargs: Unpack[CompletionCreateParams],
+ ) -> AsyncStream[ChatCompletion]: ...
+
+ async def chat_completion(
+ self,
+ messages: List[ChatCompletionMessageParam],
+ *,
+ model: Optional[str] = None,
+ temperature: Optional[float] = None,
+ max_tokens: Optional[int] = None,
+ tools: Optional[List[ChatCompletionToolParam]] = None,
+ tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
+ stream: bool = False,
+ **kwargs: Unpack[CompletionCreateParams],
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletion]]:
+ """Create chat completions with simplified interface.
+
+ Args:
+ messages: List of chat messages
+ model: Model to use (defaults to gpt-4o)
+ temperature: Sampling temperature
+ max_tokens: Maximum tokens to generate
+ tools: Available tools for function calling
+ tool_choice: Tool choice strategy
+ stream: Whether to stream the response
+ **kwargs: Additional parameters
+
+ Returns:
+ ChatCompletion response or stream
+ """
+ params: Dict[
+ str,
+ Union[
+ str,
+ List[ChatCompletionMessageParam],
+ List[ChatCompletionToolParam],
+ ChatCompletionToolChoiceOptionParam,
+ bool,
+ float,
+ int,
+ ],
+ ] = {
+ "model": model or "gpt-4o",
+ "messages": messages,
+ **kwargs,
+ }
+
+ # Add optional parameters if provided
+ if temperature is not None:
+ params["temperature"] = temperature
+ if max_tokens is not None:
+ params["max_tokens"] = max_tokens
+ if tools is not None:
+ params["tools"] = tools
+ if tool_choice is not None:
+ params["tool_choice"] = tool_choice
+ if stream is not None:
+ params["stream"] = stream
+
+ return await self.chat.completions.create(**params)
+
+
+def create_supermemory_openai(
+ supermemory_api_key: str,
+ config: Optional[SupermemoryInfiniteChatConfig] = None,
+) -> SupermemoryOpenAI:
+ """Helper function to create a SupermemoryOpenAI instance.
+
+ Args:
+ supermemory_api_key: API key for Supermemory service
+ config: Configuration for the AI provider
+
+ Returns:
+ SupermemoryOpenAI instance
+ """
+ return SupermemoryOpenAI(supermemory_api_key, config)
diff --git a/packages/openai-sdk-python/src/tools.py b/packages/openai-sdk-python/src/tools.py
new file mode 100644
index 00000000..6dfe3d2f
--- /dev/null
+++ b/packages/openai-sdk-python/src/tools.py
@@ -0,0 +1,366 @@
+"""Supermemory tools for OpenAI function calling."""
+
+import json
+from typing import Dict, List, Optional, Union, TypedDict
+
+from openai.types.chat import (
+ ChatCompletionMessageToolCall,
+ ChatCompletionToolMessageParam,
+ ChatCompletionToolParam,
+)
+import supermemory
+from supermemory.types import (
+ MemoryAddResponse,
+ MemoryGetResponse,
+ SearchExecuteResponse,
+)
+from supermemory.types.search_execute_response import Result
+
+
+class SupermemoryToolsConfig(TypedDict, total=False):
+ """Configuration for Supermemory tools.
+
+ Only one of `project_id` or `container_tags` can be provided.
+ """
+
+ base_url: Optional[str]
+ container_tags: Optional[List[str]]
+ project_id: Optional[str]
+
+
+# Type aliases using inferred types from supermemory package
+MemoryObject = Union[MemoryGetResponse, MemoryAddResponse]
+
+
+class MemorySearchResult(TypedDict, total=False):
+ """Result type for memory search operations."""
+
+ success: bool
+ results: Optional[List[Result]]
+ count: Optional[int]
+ error: Optional[str]
+
+
+class MemoryAddResult(TypedDict, total=False):
+ """Result type for memory add operations."""
+
+ success: bool
+ memory: Optional[MemoryAddResponse]
+ error: Optional[str]
+
+
+# Function schemas for OpenAI function calling
+MEMORY_TOOL_SCHEMAS = {
+ "search_memories": {
+ "name": "search_memories",
+ "description": (
+ "Search (recall) memories/details/information about the user or other facts or entities. Run when explicitly asked or when context about user's past choices would be helpful."
+ ),
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "information_to_get": {
+ "type": "string",
+ "description": "Terms to search for in the user's memories",
+ },
+ "include_full_docs": {
+ "type": "boolean",
+ "description": (
+ "Whether to include the full document content in the response. "
+ "Defaults to true for better AI context."
+ ),
+ "default": True,
+ },
+ "limit": {
+ "type": "number",
+ "description": "Maximum number of results to return",
+ "default": 10,
+ },
+ },
+ "required": ["information_to_get"],
+ },
+ },
+ "add_memory": {
+ "name": "add_memory",
+ "description": (
+ "Add (remember) memories/details/information about the user or other facts or entities. Run when explicitly asked or when the user mentions any information generalizable beyond the context of the current conversation."
+ ),
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "memory": {
+ "type": "string",
+ "description": (
+ "The text content of the memory to add. This should be a "
+ "single sentence or a short paragraph."
+ ),
+ },
+ },
+ "required": ["memory"],
+ },
+ },
+}
+
+
+class SupermemoryTools:
+ """Create memory tool handlers for OpenAI function calling."""
+
+ def __init__(self, api_key: str, config: Optional[SupermemoryToolsConfig] = None):
+ """Initialize SupermemoryTools.
+
+ Args:
+ api_key: Supermemory API key
+ config: Optional configuration
+ """
+ config = config or {}
+
+ # Initialize Supermemory client
+ client_kwargs = {"api_key": api_key}
+ if config.get("base_url"):
+ client_kwargs["base_url"] = config["base_url"]
+
+ self.client = supermemory.Supermemory(**client_kwargs)
+
+ # Set container tags
+ if config.get("project_id"):
+ self.container_tags = [f"sm_project_{config['project_id']}"]
+ elif config.get("container_tags"):
+ self.container_tags = config["container_tags"]
+ else:
+ self.container_tags = ["sm_project_default"]
+
+ def get_tool_definitions(self) -> List[ChatCompletionToolParam]:
+ """Get OpenAI function definitions for all memory tools.
+
+ Returns:
+ List of ChatCompletionToolParam definitions
+ """
+ return [
+ {"type": "function", "function": MEMORY_TOOL_SCHEMAS["search_memories"]},
+ {"type": "function", "function": MEMORY_TOOL_SCHEMAS["add_memory"]},
+ ]
+
+ async def execute_tool_call(self, tool_call: ChatCompletionMessageToolCall) -> str:
+ """Execute a tool call based on the function name and arguments.
+
+ Args:
+ tool_call: The tool call from OpenAI
+
+ Returns:
+ JSON string result
+ """
+ function_name = tool_call.function.name
+ args = json.loads(tool_call.function.arguments)
+
+ if function_name == "search_memories":
+ result = await self.search_memories(**args)
+ elif function_name == "add_memory":
+ result = await self.add_memory(**args)
+ else:
+ result = {
+ "success": False,
+ "error": f"Unknown function: {function_name}",
+ }
+
+ return json.dumps(result)
+
+ async def search_memories(
+ self,
+ information_to_get: str,
+ include_full_docs: bool = True,
+ limit: int = 10,
+ ) -> MemorySearchResult:
+ """Search memories.
+
+ Args:
+ information_to_get: Terms to search for
+ include_full_docs: Whether to include full document content
+ limit: Maximum number of results
+
+ Returns:
+ MemorySearchResult
+ """
+ try:
+ response: SearchExecuteResponse = await self.client.search.execute(
+ q=information_to_get,
+ container_tags=self.container_tags,
+ limit=limit,
+ chunk_threshold=0.6,
+ include_full_docs=include_full_docs,
+ )
+
+ return MemorySearchResult(
+ success=True,
+ results=response.results,
+ count=len(response.results),
+ )
+ except Exception as error:
+ return MemorySearchResult(
+ success=False,
+ error=str(error),
+ )
+
+ async def add_memory(self, memory: str) -> MemoryAddResult:
+ """Add a memory.
+
+ Args:
+ memory: The memory content to add
+
+ Returns:
+ MemoryAddResult
+ """
+ try:
+ metadata: Dict[str, object] = {}
+
+ add_params = {
+ "content": memory,
+ "container_tags": self.container_tags,
+ }
+ if metadata:
+ add_params["metadata"] = metadata
+
+ response: MemoryAddResponse = await self.client.memories.add(**add_params)
+
+ return MemoryAddResult(
+ success=True,
+ memory=response,
+ )
+ except Exception as error:
+ return MemoryAddResult(
+ success=False,
+ error=str(error),
+ )
+
+
+def create_supermemory_tools(
+ api_key: str, config: Optional[SupermemoryToolsConfig] = None
+) -> SupermemoryTools:
+ """Helper function to create SupermemoryTools instance.
+
+ Args:
+ api_key: Supermemory API key
+ config: Optional configuration
+
+ Returns:
+ SupermemoryTools instance
+ """
+ return SupermemoryTools(api_key, config)
+
+
+def get_memory_tool_definitions() -> List[ChatCompletionToolParam]:
+ """Get OpenAI function definitions for memory tools.
+
+ Returns:
+ List of ChatCompletionToolParam definitions
+ """
+ return [
+ {"type": "function", "function": MEMORY_TOOL_SCHEMAS["search_memories"]},
+ {"type": "function", "function": MEMORY_TOOL_SCHEMAS["add_memory"]},
+ ]
+
+
+async def execute_memory_tool_calls(
+ api_key: str,
+ tool_calls: List[ChatCompletionMessageToolCall],
+ config: Optional[SupermemoryToolsConfig] = None,
+) -> List[ChatCompletionToolMessageParam]:
+ """Execute tool calls from OpenAI function calling.
+
+ Args:
+ api_key: Supermemory API key
+ tool_calls: List of tool calls from OpenAI
+ config: Optional configuration
+
+ Returns:
+ List of tool message parameters
+ """
+ tools = SupermemoryTools(api_key, config)
+
+ async def execute_single_call(
+ tool_call: ChatCompletionMessageToolCall,
+ ) -> ChatCompletionToolMessageParam:
+ result = await tools.execute_tool_call(tool_call)
+ return ChatCompletionToolMessageParam(
+ tool_call_id=tool_call.id,
+ role="tool",
+ content=result,
+ )
+
+ # Execute all tool calls concurrently
+ import asyncio
+
+ results = await asyncio.gather(
+ *[execute_single_call(tool_call) for tool_call in tool_calls]
+ )
+
+ return results
+
+
+# Individual tool creators for more granular control
+class SearchMemoriesTool:
+ """Individual search memories tool."""
+
+ def __init__(self, api_key: str, config: Optional[SupermemoryToolsConfig] = None):
+ self.tools = SupermemoryTools(api_key, config)
+ self.definition: ChatCompletionToolParam = {
+ "type": "function",
+ "function": MEMORY_TOOL_SCHEMAS["search_memories"],
+ }
+
+ async def execute(
+ self,
+ information_to_get: str,
+ include_full_docs: bool = True,
+ limit: int = 10,
+ ) -> MemorySearchResult:
+ """Execute search memories."""
+ return await self.tools.search_memories(
+ information_to_get=information_to_get,
+ include_full_docs=include_full_docs,
+ limit=limit,
+ )
+
+
+class AddMemoryTool:
+ """Individual add memory tool."""
+
+ def __init__(self, api_key: str, config: Optional[SupermemoryToolsConfig] = None):
+ self.tools = SupermemoryTools(api_key, config)
+ self.definition: ChatCompletionToolParam = {
+ "type": "function",
+ "function": MEMORY_TOOL_SCHEMAS["add_memory"],
+ }
+
+ async def execute(self, memory: str) -> MemoryAddResult:
+ """Execute add memory."""
+ return await self.tools.add_memory(memory=memory)
+
+
+def create_search_memories_tool(
+ api_key: str, config: Optional[SupermemoryToolsConfig] = None
+) -> SearchMemoriesTool:
+ """Create individual search memories tool.
+
+ Args:
+ api_key: Supermemory API key
+ config: Optional configuration
+
+ Returns:
+ SearchMemoriesTool instance
+ """
+ return SearchMemoriesTool(api_key, config)
+
+
+def create_add_memory_tool(
+ api_key: str, config: Optional[SupermemoryToolsConfig] = None
+) -> AddMemoryTool:
+ """Create individual add memory tool.
+
+ Args:
+ api_key: Supermemory API key
+ config: Optional configuration
+
+ Returns:
+ AddMemoryTool instance
+ """
+ return AddMemoryTool(api_key, config)