Configuration
Remina uses a unified configuration system powered by Pydantic.
Configuration Structure
from remina import Memory
config = {
# L1 Cache (Redis - fixed)
"cache": { ... },
# L2 Storage (pluggable)
"storage": { ... },
# Vector Store (pluggable)
"vector_store": { ... },
# Embeddings (pluggable)
"embedder": { ... },
# LLM (pluggable)
"llm": { ... },
# Scoring weights
"weight_recency": 0.4,
"weight_frequency": 0.3,
"weight_importance": 0.3,
# Thresholds
"consolidation_threshold": 0.85,
"dedup_threshold": 0.9,
"max_facts_per_conversation": 10,
}
memory = Memory(config)Cache Configuration
Redis L1 cache is fixed (not pluggable) for performance consistency.
"cache": {
"redis_url": "redis://localhost:6379",
"ttl_seconds": 3600,
"max_memories_per_user": 100,
"enabled": True,
"key_prefix": "remina",
}| Option | Type | Default | Description |
|---|---|---|---|
redis_url | str | redis://localhost:6379 | Redis connection URL |
ttl_seconds | int | 3600 | Cache TTL |
max_memories_per_user | int | 100 | Max cached per user |
enabled | bool | True | Enable/disable caching |
key_prefix | str | remina | Redis key prefix |
Storage Configuration
L2 persistent storage. Select one provider.
SQLite (Default)
"storage": {
"provider": "sqlite",
"config": {
"path": "~/.remina/memories.db",
"collection_name": "memories",
}
}PostgreSQL
"storage": {
"provider": "postgres",
"config": {
"host": "localhost",
"port": 5432,
"database": "remina",
"user": "postgres",
"password": "password",
"collection_name": "memories",
"min_connections": 1,
"max_connections": 10,
}
}MongoDB
"storage": {
"provider": "mongodb",
"config": {
"uri": "mongodb://localhost:27017",
"database": "remina",
"collection_name": "memories",
}
}Vector Store Configuration
Chroma (Default)
"vector_store": {
"provider": "chroma",
"config": {
"path": "~/.remina/chroma",
"collection_name": "remina",
}
}Qdrant
"vector_store": {
"provider": "qdrant",
"config": {
"url": "http://localhost:6333",
"api_key": None,
"collection_name": "remina",
"embedding_dims": 1536,
}
}Pinecone
"vector_store": {
"provider": "pinecone",
"config": {
"api_key": "your-api-key",
"index_name": "remina",
"namespace": "default",
"embedding_dims": 768,
"cloud": "aws",
"region": "us-east-1",
}
}Embedder Configuration
OpenAI (Default)
"embedder": {
"provider": "openai",
"config": {
"api_key": None, # Uses OPENAI_API_KEY env
"model": "text-embedding-3-small",
"dimensions": 1536,
"base_url": None,
}
}Google Gemini
"embedder": {
"provider": "gemini",
"config": {
"api_key": None, # Uses GOOGLE_API_KEY env
"model": "models/text-embedding-004",
"dimensions": 768,
}
}Cohere
"embedder": {
"provider": "cohere",
"config": {
"api_key": None, # Uses COHERE_API_KEY env
"model": "embed-english-v3.0",
}
}Ollama (Local)
"embedder": {
"provider": "ollama",
"config": {
"base_url": "http://localhost:11434",
"model": "nomic-embed-text",
}
}HuggingFace (Local)
"embedder": {
"provider": "huggingface",
"config": {
"model": "sentence-transformers/all-MiniLM-L6-v2",
"device": "cpu",
}
}LLM Configuration
OpenAI (Default)
"llm": {
"provider": "openai",
"config": {
"api_key": None, # Uses OPENAI_API_KEY env
"model": "gpt-4o-mini",
"temperature": 0.1,
"max_tokens": 2000,
"base_url": None,
}
}Google Gemini
"llm": {
"provider": "gemini",
"config": {
"api_key": None, # Uses GOOGLE_API_KEY env
"model": "gemini-2.0-flash",
"temperature": 0.1,
"max_tokens": 2000,
}
}Anthropic Claude
"llm": {
"provider": "anthropic",
"config": {
"api_key": None, # Uses ANTHROPIC_API_KEY env
"model": "claude-3-5-sonnet-20240620",
"temperature": 0.1,
"max_tokens": 2000,
}
}Ollama (Local)
"llm": {
"provider": "ollama",
"config": {
"base_url": "http://localhost:11434",
"model": "llama3.2",
"temperature": 0.1,
}
}Scoring Weights
Control memory ranking during search:
# Must sum to 1.0
"weight_recency": 0.4, # Temporal decay factor
"weight_frequency": 0.3, # Access count factor
"weight_importance": 0.3, # Base importance factorThresholds
# Consolidation: similarity threshold for merging
"consolidation_threshold": 0.85,
# Deduplication: similarity threshold for duplicate detection
"dedup_threshold": 0.9,
# Max facts extracted per conversation
"max_facts_per_conversation": 10,Environment Variables
| Variable | Provider |
|---|---|
OPENAI_API_KEY | OpenAI embeddings/LLM |
OPENAI_BASE_URL | Custom OpenAI endpoint |
GOOGLE_API_KEY | Gemini embeddings/LLM |
ANTHROPIC_API_KEY | Anthropic Claude |
COHERE_API_KEY | Cohere embeddings |
PINECONE_API_KEY | Pinecone vector store |
Complete Example
from remina import Memory
config = {
"cache": {
"redis_url": "redis://localhost:6379",
"ttl_seconds": 3600,
"enabled": True,
},
"storage": {
"provider": "postgres",
"config": {
"host": "localhost",
"port": 5432,
"database": "remina",
"user": "postgres",
"password": "password",
}
},
"vector_store": {
"provider": "qdrant",
"config": {
"url": "http://localhost:6333",
"collection_name": "memories",
"embedding_dims": 768,
}
},
"embedder": {
"provider": "gemini",
"config": {
"model": "models/text-embedding-004",
"dimensions": 768,
}
},
"llm": {
"provider": "gemini",
"config": {
"model": "gemini-2.0-flash",
"temperature": 0.1,
}
},
"weight_recency": 0.4,
"weight_frequency": 0.3,
"weight_importance": 0.3,
"dedup_threshold": 0.9,
}
memory = Memory(config)Pydantic Config Classes
For type safety:
from remina.configs.base import MemoryConfig
from remina.configs.storage import StorageConfig
from remina.configs.vectors import VectorStoreConfig
from remina.configs.embeddings import EmbedderConfig
from remina.configs.llms import LLMConfig
from remina.configs.cache import CacheConfig
config = MemoryConfig(
cache=CacheConfig(redis_url="redis://localhost:6379"),
storage=StorageConfig(provider="postgres", config={...}),
vector_store=VectorStoreConfig(provider="qdrant", config={...}),
embedder=EmbedderConfig(provider="gemini", config={...}),
llm=LLMConfig(provider="gemini", config={...}),
)
memory = Memory(config)Next Steps
- Providers — Detailed provider documentation
- Examples — Configuration examples
- Deployment — Production deployment