aboutsummaryrefslogtreecommitdiff
path: root/packages/openai-sdk-python
diff options
context:
space:
mode:
authorCodeWithShreyans <[email protected]>2025-09-02 23:11:19 +0000
committerCodeWithShreyans <[email protected]>2025-09-02 23:11:19 +0000
commitcae7051d1a0547e78a8d32d865a89778456707ce (patch)
treeb5fbc018dc6e38b2618241046a8bf8129f34d4f2 /packages/openai-sdk-python
parentux: support integration (#405) (diff)
downloadsupermemory-cae7051d1a0547e78a8d32d865a89778456707ce.tar.xz
supermemory-cae7051d1a0547e78a8d32d865a89778456707ce.zip
feat: new tools package (#407)
Diffstat (limited to 'packages/openai-sdk-python')
-rw-r--r--packages/openai-sdk-python/README.md131
-rw-r--r--packages/openai-sdk-python/pyproject.toml2
-rw-r--r--packages/openai-sdk-python/src/__init__.py20
-rw-r--r--packages/openai-sdk-python/src/infinite_chat.py268
-rw-r--r--packages/openai-sdk-python/tests/test_infinite_chat.py387
5 files changed, 23 insertions, 785 deletions
diff --git a/packages/openai-sdk-python/README.md b/packages/openai-sdk-python/README.md
index fdc03b54..1f4154bc 100644
--- a/packages/openai-sdk-python/README.md
+++ b/packages/openai-sdk-python/README.md
@@ -1,17 +1,8 @@
# Supermemory OpenAI Python SDK
-Enhanced OpenAI Python SDK with Supermemory infinite context integration.
+Memory tools for OpenAI function calling with Supermemory integration.
-This package extends the official [OpenAI Python SDK](https://github.com/openai/openai-python) with [Supermemory](https://supermemory.ai) capabilities, enabling infinite context chat completions and memory management tools.
-
-## Features
-
-- 🚀 **Infinite Context**: Chat completions with unlimited conversation history
-- 🧠 **Memory Tools**: Search, add, and fetch user memories seamlessly
-- 🔌 **Multiple Providers**: Support for OpenAI, Anthropic, Groq, and more
-- 🛠 **Function Calling**: Built-in memory tools for OpenAI function calling
-- 🔒 **Type Safe**: Full TypeScript-style type hints for Python
-- âš¡ **Async Support**: Full async/await support
+This package provides memory management tools for the official [OpenAI Python SDK](https://github.com/openai/openai-python) using [Supermemory](https://supermemory.ai) capabilities.
## Installation
@@ -29,58 +20,26 @@ pip install supermemory-openai
## Quick Start
-### Basic Chat Completion
+### Using Memory Tools with OpenAI
```python
import asyncio
-from supermemory_openai import SupermemoryOpenAI, SupermemoryInfiniteChatConfigWithProviderName
+import openai
+from supermemory_openai import SupermemoryTools, execute_memory_tool_calls
async def main():
- # Initialize client
- client = SupermemoryOpenAI(
- supermemory_api_key="your-supermemory-api-key",
- config=SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key="your-openai-api-key",
- )
- )
-
- # Create chat completion
- response = await client.chat_completion(
- messages=[
- {"role": "user", "content": "Hello, how are you?"}
- ],
- model="gpt-4o"
- )
-
- print(response.choices[0].message.content)
-
-asyncio.run(main())
-```
-
-### Using Memory Tools
-
-```python
-import asyncio
-from supermemory_openai import SupermemoryOpenAI, SupermemoryTools, SupermemoryInfiniteChatConfigWithProviderName
-
-async def main():
- # Initialize client and tools
- client = SupermemoryOpenAI(
- supermemory_api_key="your-supermemory-api-key",
- config=SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key="your-openai-api-key",
- )
- )
+ # Initialize OpenAI client
+ client = openai.AsyncOpenAI(api_key="your-openai-api-key")
+ # Initialize Supermemory tools
tools = SupermemoryTools(
api_key="your-supermemory-api-key",
config={"project_id": "my-project"}
)
# Chat with memory tools
- response = await client.chat_completion(
+ response = await client.chat.completions.create(
+ model="gpt-4o",
messages=[
{
"role": "system",
@@ -91,10 +50,18 @@ async def main():
"content": "Remember that I prefer tea over coffee"
}
],
- tools=tools.get_tool_definitions(),
- model="gpt-4o"
+ tools=tools.get_tool_definitions()
)
+ # Handle tool calls if present
+ if response.choices[0].message.tool_calls:
+ tool_results = await execute_memory_tool_calls(
+ api_key="your-supermemory-api-key",
+ tool_calls=response.choices[0].message.tool_calls,
+ config={"project_id": "my-project"}
+ )
+ print("Tool results:", tool_results)
+
print(response.choices[0].message.content)
asyncio.run(main())
@@ -102,42 +69,6 @@ asyncio.run(main())
## Configuration
-### Provider Configuration
-
-#### Using Provider Names
-
-```python
-from supermemory_openai import SupermemoryInfiniteChatConfigWithProviderName
-
-config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai", # or "anthropic", "groq", "openrouter", etc.
- provider_api_key="your-provider-api-key",
- headers={"custom-header": "value"} # optional
-)
-```
-
-#### Using Custom URLs
-
-```python
-from supermemory_openai import SupermemoryInfiniteChatConfigWithProviderUrl
-
-config = SupermemoryInfiniteChatConfigWithProviderUrl(
- provider_url="https://your-custom-endpoint.com/v1",
- provider_api_key="your-provider-api-key",
- headers={"custom-header": "value"} # optional
-)
-```
-
-### Supported Providers
-
-- `openai` - OpenAI API
-- `anthropic` - Anthropic Claude
-- `openrouter` - OpenRouter
-- `deepinfra` - DeepInfra
-- `groq` - Groq
-- `google` - Google AI
-- `cloudflare` - Cloudflare Workers AI
-
## Memory Tools
### SupermemoryTools Class
@@ -205,24 +136,6 @@ if response.choices[0].message.tool_calls:
## API Reference
-### SupermemoryOpenAI
-
-Enhanced OpenAI client with infinite context support.
-
-#### Constructor
-
-```python
-SupermemoryOpenAI(
- supermemory_api_key: str,
- config: Optional[SupermemoryInfiniteChatConfig] = None
-)
-```
-
-#### Methods
-
-- `chat_completion()` - Create chat completion with simplified interface
-- `create_chat_completion()` - Create chat completion with full OpenAI parameters
-
### SupermemoryTools
Memory management tools for function calling.
@@ -261,9 +174,7 @@ except Exception as e:
Set these environment variables for testing:
- `SUPERMEMORY_API_KEY` - Your Supermemory API key
-- `PROVIDER_API_KEY` - Your AI provider API key
-- `PROVIDER_NAME` - Provider name (default: "openai")
-- `PROVIDER_URL` - Custom provider URL (optional)
+- `OPENAI_API_KEY` - Your OpenAI API key
- `MODEL_NAME` - Model to use (default: "gpt-4o-mini")
- `SUPERMEMORY_BASE_URL` - Custom Supermemory base URL (optional)
diff --git a/packages/openai-sdk-python/pyproject.toml b/packages/openai-sdk-python/pyproject.toml
index 78ea3000..d674fec9 100644
--- a/packages/openai-sdk-python/pyproject.toml
+++ b/packages/openai-sdk-python/pyproject.toml
@@ -5,7 +5,7 @@ build-backend = "hatchling.build"
[project]
name = "supermemory-openai-sdk"
version = "1.0.0"
-description = "OpenAI SDK utilities for supermemory"
+description = "Memory tools for OpenAI function calling with supermemory"
readme = "README.md"
license = { text = "MIT" }
keywords = ["openai", "supermemory", "ai", "memory"]
diff --git a/packages/openai-sdk-python/src/__init__.py b/packages/openai-sdk-python/src/__init__.py
index 47a7569e..b8564471 100644
--- a/packages/openai-sdk-python/src/__init__.py
+++ b/packages/openai-sdk-python/src/__init__.py
@@ -1,14 +1,4 @@
-"""Supermemory OpenAI SDK - Enhanced OpenAI Python SDK with infinite context."""
-
-from .infinite_chat import (
- SupermemoryOpenAI,
- SupermemoryInfiniteChatConfig,
- SupermemoryInfiniteChatConfigWithProviderName,
- SupermemoryInfiniteChatConfigWithProviderUrl,
- ProviderName,
- PROVIDER_MAP,
- create_supermemory_openai,
-)
+"""Supermemory OpenAI SDK - Memory tools for OpenAI function calling."""
from .tools import (
SupermemoryTools,
@@ -29,14 +19,6 @@ from .tools import (
__version__ = "0.1.0"
__all__ = [
- # Infinite Chat
- "SupermemoryOpenAI",
- "SupermemoryInfiniteChatConfig",
- "SupermemoryInfiniteChatConfigWithProviderName",
- "SupermemoryInfiniteChatConfigWithProviderUrl",
- "ProviderName",
- "PROVIDER_MAP",
- "create_supermemory_openai",
# Tools
"SupermemoryTools",
"SupermemoryToolsConfig",
diff --git a/packages/openai-sdk-python/src/infinite_chat.py b/packages/openai-sdk-python/src/infinite_chat.py
deleted file mode 100644
index 1d3890ae..00000000
--- a/packages/openai-sdk-python/src/infinite_chat.py
+++ /dev/null
@@ -1,268 +0,0 @@
-"""Enhanced OpenAI client with Supermemory infinite context integration."""
-
-from typing import Dict, List, Optional, Union, overload, Unpack
-from typing_extensions import Literal
-
-from openai import OpenAI, AsyncStream
-from openai.types.chat import (
- ChatCompletion,
- ChatCompletionMessageParam,
- ChatCompletionToolParam,
- ChatCompletionToolChoiceOptionParam,
- CompletionCreateParams,
-)
-
-
-# Provider URL mapping
-PROVIDER_MAP = {
- "openai": "https://api.openai.com/v1",
- "anthropic": "https://api.anthropic.com/v1",
- "openrouter": "https://openrouter.ai/api/v1",
- "deepinfra": "https://api.deepinfra.com/v1/openai",
- "groq": "https://api.groq.com/openai/v1",
- "google": "https://generativelanguage.googleapis.com/v1beta/openai",
- "cloudflare": "https://gateway.ai.cloudflare.com/v1/*/unlimited-context/openai",
-}
-
-ProviderName = Literal[
- "openai", "anthropic", "openrouter", "deepinfra", "groq", "google", "cloudflare"
-]
-
-
-class SupermemoryInfiniteChatConfigBase:
- """Base configuration for Supermemory infinite chat."""
-
- def __init__(
- self,
- provider_api_key: str,
- headers: Optional[Dict[str, str]] = None,
- ):
- self.provider_api_key = provider_api_key
- self.headers = headers or {}
-
-
-class SupermemoryInfiniteChatConfigWithProviderName(SupermemoryInfiniteChatConfigBase):
- """Configuration using a predefined provider name."""
-
- def __init__(
- self,
- provider_name: ProviderName,
- provider_api_key: str,
- headers: Optional[Dict[str, str]] = None,
- ):
- super().__init__(provider_api_key, headers)
- self.provider_name = provider_name
- self.provider_url: None = None
-
-
-class SupermemoryInfiniteChatConfigWithProviderUrl(SupermemoryInfiniteChatConfigBase):
- """Configuration using a custom provider URL."""
-
- def __init__(
- self,
- provider_url: str,
- provider_api_key: str,
- headers: Optional[Dict[str, str]] = None,
- ):
- super().__init__(provider_api_key, headers)
- self.provider_url = provider_url
- self.provider_name: None = None
-
-
-SupermemoryInfiniteChatConfig = Union[
- SupermemoryInfiniteChatConfigWithProviderName,
- SupermemoryInfiniteChatConfigWithProviderUrl,
-]
-
-
-class SupermemoryOpenAI(OpenAI):
- """Enhanced OpenAI client with supermemory integration.
-
- Only chat completions are supported - all other OpenAI API endpoints are disabled.
- """
-
- def __init__(
- self,
- supermemory_api_key: str,
- config: Optional[SupermemoryInfiniteChatConfig] = None,
- ):
- """Initialize the SupermemoryOpenAI client.
-
- Args:
- supermemory_api_key: API key for Supermemory service
- config: Configuration for the AI provider
- """
- # Determine base URL
- if config is None:
- base_url = "https://api.openai.com/v1"
- api_key = None
- headers = {}
- elif hasattr(config, "provider_name") and config.provider_name:
- base_url = PROVIDER_MAP[config.provider_name]
- api_key = config.provider_api_key
- headers = config.headers
- else:
- base_url = config.provider_url
- api_key = config.provider_api_key
- headers = config.headers
-
- # Prepare default headers
- default_headers = {
- "x-supermemory-api-key": supermemory_api_key,
- **headers,
- }
-
- # Initialize the parent OpenAI client
- super().__init__(
- api_key=api_key,
- base_url=base_url,
- default_headers=default_headers,
- )
-
- self._supermemory_api_key = supermemory_api_key
-
- # Disable unsupported endpoints
- self._disable_unsupported_endpoints()
-
- def _disable_unsupported_endpoints(self) -> None:
- """Disable all OpenAI endpoints except chat completions."""
-
- def unsupported_error() -> None:
- raise RuntimeError(
- "Supermemory only supports chat completions. "
- "Use chat_completion() or chat.completions.create() instead."
- )
-
- # List of endpoints to disable
- endpoints = [
- "embeddings",
- "fine_tuning",
- "images",
- "audio",
- "models",
- "moderations",
- "files",
- "batches",
- "uploads",
- "beta",
- ]
-
- # Override endpoints with error function
- for endpoint in endpoints:
- setattr(self, endpoint, property(lambda self: unsupported_error()))
-
- async def create_chat_completion(
- self,
- **params: Unpack[CompletionCreateParams],
- ) -> ChatCompletion:
- """Create chat completions with infinite context support.
-
- Args:
- **params: Parameters for chat completion
-
- Returns:
- ChatCompletion response
- """
- return await self.chat.completions.create(**params)
-
- @overload
- async def chat_completion(
- self,
- messages: List[ChatCompletionMessageParam],
- *,
- model: Optional[str] = None,
- temperature: Optional[float] = None,
- max_tokens: Optional[int] = None,
- tools: Optional[List[ChatCompletionToolParam]] = None,
- tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
- stream: Literal[False] = False,
- **kwargs: Unpack[CompletionCreateParams],
- ) -> ChatCompletion: ...
-
- @overload
- async def chat_completion(
- self,
- messages: List[ChatCompletionMessageParam],
- *,
- model: Optional[str] = None,
- temperature: Optional[float] = None,
- max_tokens: Optional[int] = None,
- tools: Optional[List[ChatCompletionToolParam]] = None,
- tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
- stream: Literal[True],
- **kwargs: Unpack[CompletionCreateParams],
- ) -> AsyncStream[ChatCompletion]: ...
-
- async def chat_completion(
- self,
- messages: List[ChatCompletionMessageParam],
- *,
- model: Optional[str] = None,
- temperature: Optional[float] = None,
- max_tokens: Optional[int] = None,
- tools: Optional[List[ChatCompletionToolParam]] = None,
- tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None,
- stream: bool = False,
- **kwargs: Unpack[CompletionCreateParams],
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletion]]:
- """Create chat completions with simplified interface.
-
- Args:
- messages: List of chat messages
- model: Model to use (defaults to gpt-4o)
- temperature: Sampling temperature
- max_tokens: Maximum tokens to generate
- tools: Available tools for function calling
- tool_choice: Tool choice strategy
- stream: Whether to stream the response
- **kwargs: Additional parameters
-
- Returns:
- ChatCompletion response or stream
- """
- params: Dict[
- str,
- Union[
- str,
- List[ChatCompletionMessageParam],
- List[ChatCompletionToolParam],
- ChatCompletionToolChoiceOptionParam,
- bool,
- float,
- int,
- ],
- ] = {
- "model": model or "gpt-4o",
- "messages": messages,
- **kwargs,
- }
-
- # Add optional parameters if provided
- if temperature is not None:
- params["temperature"] = temperature
- if max_tokens is not None:
- params["max_tokens"] = max_tokens
- if tools is not None:
- params["tools"] = tools
- if tool_choice is not None:
- params["tool_choice"] = tool_choice
- if stream is not None:
- params["stream"] = stream
-
- return await self.chat.completions.create(**params)
-
-
-def create_supermemory_openai(
- supermemory_api_key: str,
- config: Optional[SupermemoryInfiniteChatConfig] = None,
-) -> SupermemoryOpenAI:
- """Helper function to create a SupermemoryOpenAI instance.
-
- Args:
- supermemory_api_key: API key for Supermemory service
- config: Configuration for the AI provider
-
- Returns:
- SupermemoryOpenAI instance
- """
- return SupermemoryOpenAI(supermemory_api_key, config)
diff --git a/packages/openai-sdk-python/tests/test_infinite_chat.py b/packages/openai-sdk-python/tests/test_infinite_chat.py
deleted file mode 100644
index 9fdf52c5..00000000
--- a/packages/openai-sdk-python/tests/test_infinite_chat.py
+++ /dev/null
@@ -1,387 +0,0 @@
-"""Tests for infinite_chat module."""
-
-import os
-import pytest
-from typing import List
-
-from openai.types.chat import ChatCompletionMessageParam
-from ..src import (
- SupermemoryOpenAI,
- SupermemoryInfiniteChatConfigWithProviderName,
- SupermemoryInfiniteChatConfigWithProviderUrl,
- ProviderName,
-)
-
-
-# Test configuration
-PROVIDERS: List[ProviderName] = [
- "openai",
- "anthropic",
- "openrouter",
- "deepinfra",
- "groq",
- "google",
- "cloudflare",
-]
-
-
-def test_api_key() -> str:
- """Get test Supermemory API key from environment."""
- api_key = os.getenv("SUPERMEMORY_API_KEY")
- if not api_key:
- pytest.skip("SUPERMEMORY_API_KEY environment variable is required for tests")
- return api_key
-
-
-def test_provider_api_key() -> str:
- """Get test provider API key from environment."""
- api_key = os.getenv("PROVIDER_API_KEY")
- if not api_key:
- pytest.skip("PROVIDER_API_KEY environment variable is required for tests")
- return api_key
-
-
-def test_provider_name() -> ProviderName:
- """Get test provider name from environment."""
- provider_name = os.getenv("PROVIDER_NAME", "openai")
- if provider_name not in PROVIDERS:
- pytest.fail(f"Invalid provider name: {provider_name}")
- return provider_name # type: ignore
-
-
-def test_provider_url() -> str:
- """Get test provider URL from environment."""
- return os.getenv("PROVIDER_URL", "")
-
-
-def test_model_name() -> str:
- """Get test model name from environment."""
- return os.getenv("MODEL_NAME", "gpt-4o-mini")
-
-
-def test_headers() -> dict:
- """Get test headers."""
- return {"custom-header": "test-value"}
-
-
-def test_messages() -> List[List[ChatCompletionMessageParam]]:
- """Test message sets."""
- return [
- [{"role": "user", "content": "Hello!"}],
- [
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "What is AI?"},
- ],
- [
- {"role": "user", "content": "Tell me a joke"},
- {
- "role": "assistant",
- "content": "Why don't scientists trust atoms? Because they make up everything!",
- },
- {"role": "user", "content": "Tell me another one"},
- ],
- ]
-
-
-class TestClientCreation:
- """Test client creation."""
-
- def test_create_client_with_provider_name(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_headers: dict,
- ):
- """Test creating client with provider name configuration."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers=test_headers,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
- assert client.chat is not None
-
- def test_create_client_with_openai_provider(
- self, test_api_key: str, test_provider_api_key: str, test_headers: dict
- ):
- """Test creating client with OpenAI provider configuration."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key=test_provider_api_key,
- headers=test_headers,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
- def test_create_client_with_custom_provider_url(
- self, test_api_key: str, test_provider_api_key: str, test_headers: dict
- ):
- """Test creating client with custom provider URL."""
- custom_url = "https://custom-provider.com/v1"
- config = SupermemoryInfiniteChatConfigWithProviderUrl(
- provider_url=custom_url,
- provider_api_key=test_provider_api_key,
- headers=test_headers,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
-
-class TestChatCompletions:
- """Test chat completions functionality."""
-
- @pytest.mark.asyncio
- async def test_create_chat_completion_simple_message(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- test_messages: List[List[ChatCompletionMessageParam]],
- ):
- """Test creating chat completion with simple message."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.create_chat_completion(
- model=test_model_name,
- messages=test_messages[0], # "Hello!"
- )
-
- assert result is not None
- assert hasattr(result, "choices")
- assert len(result.choices) > 0
- assert result.choices[0].message.content is not None
-
- @pytest.mark.asyncio
- async def test_chat_completion_convenience_method(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- test_messages: List[List[ChatCompletionMessageParam]],
- ):
- """Test chat completion using convenience method."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.chat_completion(
- messages=test_messages[1], # System + user messages
- model=test_model_name,
- temperature=0.7,
- )
-
- assert result is not None
- assert hasattr(result, "choices")
- assert len(result.choices) > 0
- assert result.choices[0].message.content is not None
-
- @pytest.mark.asyncio
- async def test_handle_conversation_history(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- test_messages: List[List[ChatCompletionMessageParam]],
- ):
- """Test handling conversation history."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.chat_completion(
- messages=test_messages[2], # Multi-turn conversation
- model=test_model_name,
- )
-
- assert result is not None
- assert hasattr(result, "choices")
- assert len(result.choices) > 0
- assert result.choices[0].message.content is not None
-
- @pytest.mark.asyncio
- async def test_custom_headers(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- test_model_name: str,
- ):
- """Test working with custom headers."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={"x-custom-header": "test-value"},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- result = await client.chat_completion(
- messages=[{"role": "user", "content": "Hello"}],
- model=test_model_name,
- )
-
- assert result is not None
- assert hasattr(result, "choices")
-
-
-class TestConfigurationValidation:
- """Test configuration validation."""
-
- def test_handle_empty_headers_object(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- ):
- """Test handling empty headers object."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- headers={},
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
- def test_handle_configuration_without_headers(
- self,
- test_api_key: str,
- test_provider_api_key: str,
- test_provider_name: ProviderName,
- ):
- """Test handling configuration without headers."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name=test_provider_name,
- provider_api_key=test_provider_api_key,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- assert client is not None
-
- def test_handle_different_api_keys(self):
- """Test handling different API keys."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key="different-provider-key",
- )
-
- client = SupermemoryOpenAI("different-sm-key", config)
-
- assert client is not None
-
-
-class TestDisabledEndpoints:
- """Test that non-chat endpoints are disabled."""
-
- def test_disabled_endpoints_throw_errors(
- self, test_api_key: str, test_provider_api_key: str
- ):
- """Test that all disabled endpoints throw appropriate errors."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key=test_provider_api_key,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- # Test that all disabled endpoints throw appropriate errors
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.embeddings
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.fine_tuning
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.images
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.audio
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.models
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.moderations
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.files
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.batches
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.uploads
-
- with pytest.raises(
- RuntimeError, match="Supermemory only supports chat completions"
- ):
- _ = client.beta
-
- def test_chat_completions_still_work(
- self, test_api_key: str, test_provider_api_key: str
- ):
- """Test that chat completions still work after disabling other endpoints."""
- config = SupermemoryInfiniteChatConfigWithProviderName(
- provider_name="openai",
- provider_api_key=test_provider_api_key,
- )
-
- client = SupermemoryOpenAI(test_api_key, config)
-
- # Chat completions should still be accessible
- assert client.chat is not None
- assert client.chat.completions is not None
- assert callable(client.create_chat_completion)
- assert callable(client.chat_completion)