Skip to main content
Store user preferences, conversation history, and facts in your AI application’s memory system for intelligent recall and personalization. Store information and conversations in your memory system.

Basic Usage

from gravixlayer import GravixLayer

client = GravixLayer()

# Initialize memory with all required parameters
memory = client.memory(
    embedding_model="baai/bge-large-en-v1.5",
    inference_model="google/gemma-3-12b-it",
    index_name="my_memories",
    cloud_provider="AWS",
    region="us-east-1"
)

# Add simple text
result = memory.add("I love pizza", user_id="alice")
print(f"Added memory: {result['results'][0]['memory']}")
print(f"Memory ID: {result['results'][0]['id']}")

# Add with metadata
result = memory.add("User prefers dark mode", user_id="alice", metadata={"type": "preference"})
print(f"Added preference: {result['results'][0]['memory']}")
print(f"Metadata: {result['results'][0]['metadata']}")

# Get all memories to verify
all_memories = memory.get_all(user_id="alice")
print(f"\nTotal memories for alice: {len(all_memories['results'])}")
for i, mem in enumerate(all_memories['results'], 1):
    print(f"{i}. {mem['memory']}")
    if mem.get('metadata'):
        print(f"   Metadata: {mem['metadata']}")
Expected Output:
Added memory: I love pizza
Memory ID: b355d0d2-3eaa-4bc6-a61b-48ee615279bf
Added preference: User prefers dark mode
Metadata: {'type': 'preference'}

Total memories for alice: 2
1. I love pizza
2. User prefers dark mode
   Metadata: {'type': 'preference'}

Add Conversations

Store entire conversations and let AI extract key memories:
from gravixlayer import GravixLayer

client = GravixLayer()

# Initialize memory
memory = client.memory(
    embedding_model="baai/bge-large-en-v1.5",
    inference_model="google/gemma-3-12b-it",
    index_name="conversations",
    cloud_provider="AWS",
    region="us-east-1"
)

# Store a conversation with AI inference
conversation = [
    {"role": "user", "content": "I'm planning to watch a movie tonight. Any recommendations?"},
    {"role": "assistant", "content": "How about thriller movies? They can be quite engaging."},
    {"role": "user", "content": "I'm not a big fan of thriller movies but I love sci-fi movies."},
    {"role": "assistant", "content": "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future."}
]

result = memory.add(conversation, user_id="alice", infer=True, metadata={"type": "conversation"})
print(f"AI extracted {len(result['results'])} memories from conversation:")

for i, extracted_memory in enumerate(result['results'], 1):
    print(f"{i}. {extracted_memory['memory']}")
    print(f"   ID: {extracted_memory['id']}")
    if extracted_memory.get('metadata'):
        print(f"   Metadata: {extracted_memory['metadata']}")

# Verify by searching for movie preferences
search_results = memory.search("movie preferences", user_id="alice")
print(f"\nFound {len(search_results['results'])} movie-related memories:")
for result in search_results['results']:
    print(f"- {result['memory']}")
Expected Output:
AI extracted 2 memories from conversation:
1. User prefers sci-fi movies
   ID: c455d0d2-3eaa-4bc6-a61b-48ee615279bf
   Metadata: {'type': 'conversation'}
2. User dislikes thriller movies
   ID: d755d0d2-3eaa-4bc6-a61b-48ee615279bf
   Metadata: {'type': 'conversation'}

Found 2 movie-related memories:
- User prefers sci-fi movies
- User dislikes thriller movies

Multilingual Support

Initialize memory with multilingual embedding model for multi-language support:

Configuration Parameters

All parameters are REQUIRED:
  • embedding_model - How text gets converted to searchable vectors (required)
  • inference_model - AI model that extracts memories from conversations (required)
  • index_name - Where memories are stored (like folders) (required)
  • cloud_provider - Where your data is hosted (required)
  • region - Cloud region (required)
  • delete_protection - Protect index from deletion (optional, default: False)
from gravixlayer import GravixLayer

client = GravixLayer()

# Initialize with multilingual support
memory = client.memory(
    embedding_model="microsoft/multilingual-e5-large",  # Supports 100+ languages
    inference_model="google/gemma-3-12b-it",
    index_name="user_preferences",
    cloud_provider="GCP",
    region="us-central1",
    delete_protection=False  # Optional, defaults to False
)

# Now works with any language
result1 = memory.add("El usuario prefiere pizza", user_id="alice")
result2 = memory.add("L'utilisateur aime le café", user_id="alice")
result3 = memory.add("用户喜欢寿司", user_id="alice")

print("Added multilingual memories:")
print(f"Spanish: {result1['results'][0]['memory']}")
print(f"French: {result2['results'][0]['memory']}")
print(f"Chinese: {result3['results'][0]['memory']}")

# Check current configuration
config = memory.get_current_configuration()
print(f"\nCurrent configuration:")
print(f"Embedding model: {config['embedding_model']}")
print(f"Inference model: {config['inference_model']}")
print(f"Index name: {config['index_name']}")

# Search works across all languages
search_results = memory.search("food preferences", user_id="alice")
print(f"\nFound {len(search_results['results'])} food-related memories:")
for result in search_results['results']:
    print(f"- {result['memory']}")
Expected Output:
Added multilingual memories:
Spanish: El usuario prefiere pizza
French: L'utilisateur aime le café
Chinese: 用户喜欢寿司

Current configuration:
Embedding model: microsoft/multilingual-e5-large
Inference model: google/gemma-3-12b-it
Index name: user_preferences

Found 3 food-related memories:
- El usuario prefiere pizza
- L'utilisateur aime le café
- 用户喜欢寿司