LangChain Integration
Use Remina as a persistent memory backend for LangChain conversational agents.
Prerequisites
pip install remina-memory[gemini,sqlite,chroma,redis] langchain langchain-google-genaiOverview
This integration provides:
- Persistent memory that survives across sessions
- Semantic retrieval of relevant context for each interaction
- Automatic fact extraction from conversations
Implementation
Memory Wrapper
from typing import List, Dict, Any
from remina import Memory
class ReminaLangChainMemory:
"""Remina-backed memory for LangChain conversations."""
def __init__(self, config: Dict[str, Any] = None):
self.memory = Memory(config)
def get_relevant_context(self, query: str, user_id: str, limit: int = 5) -> str:
"""Retrieve relevant memories for the current query."""
results = self.memory.search(
query=query,
user_id=user_id,
limit=limit
)
if not results["results"]:
return ""
memories = [m["memory"] for m in results["results"] if m["score"] > 0.3]
if not memories:
return ""
return "Relevant context about this user:\n" + "\n".join(f"- {m}" for m in memories)
def store_interaction(
self,
user_message: str,
assistant_message: str,
user_id: str,
metadata: Dict[str, Any] = None
):
"""Store conversation interaction in Remina."""
self.memory.add(
messages=[
{"role": "user", "content": user_message},
{"role": "assistant", "content": assistant_message},
],
user_id=user_id,
metadata=metadata or {}
)
def get_all_memories(self, user_id: str, limit: int = 100) -> List[str]:
"""Retrieve all memories for a user."""
results = self.memory.get_all(user_id=user_id, limit=limit)
return [m["memory"] for m in results["results"]]
def clear_memories(self, user_id: str):
"""Clear all memories for a user."""
self.memory.delete_all(user_id=user_id)
def close(self):
"""Close memory connections."""
self.memory.close()Conversation Chain
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage, AIMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
class ReminaConversationChain:
"""LangChain conversation chain with Remina memory integration."""
def __init__(
self,
model: str = "gemini-2.0-flash",
remina_config: Dict[str, Any] = None,
temperature: float = 0.7
):
self.llm = ChatGoogleGenerativeAI(model=model, temperature=temperature)
self.remina = ReminaLangChainMemory(remina_config)
self.prompt = ChatPromptTemplate.from_messages([
("system", "{system_prompt}"),
MessagesPlaceholder(variable_name="history"),
("human", "{input}")
])
self.chain = self.prompt | self.llm
def invoke(
self,
user_input: str,
user_id: str,
history: List = None,
system_prompt: str = None
) -> str:
"""Process user input with memory-augmented context."""
history = history or []
# Retrieve relevant memories
memory_context = self.remina.get_relevant_context(user_input, user_id)
# Build system prompt with memory context
base_prompt = system_prompt or "You are a helpful AI assistant with memory capabilities."
if memory_context:
full_system_prompt = f"{base_prompt}\n\n{memory_context}"
else:
full_system_prompt = base_prompt
# Invoke chain
response = self.chain.invoke({
"system_prompt": full_system_prompt,
"history": history,
"input": user_input
})
assistant_message = response.content
# Store interaction in Remina
self.remina.store_interaction(
user_message=user_input,
assistant_message=assistant_message,
user_id=user_id
)
return assistant_message
def get_memories(self, user_id: str) -> List[str]:
return self.remina.get_all_memories(user_id)
def clear_memories(self, user_id: str):
self.remina.clear_memories(user_id)
def close(self):
self.remina.close()Usage
# Configuration
remina_config = {
"cache": {
"redis_url": "redis://localhost:6379",
"enabled": True,
},
"storage": {
"provider": "sqlite",
"config": {"path": "~/.remina/langchain_demo.db"}
},
"vector_store": {
"provider": "chroma",
"config": {"path": "~/.remina/langchain_chroma"}
},
"embedder": {
"provider": "gemini",
"config": {
"model": "models/text-embedding-004",
"dimensions": 768
}
},
"llm": {
"provider": "gemini",
"config": {"model": "gemini-2.0-flash"}
},
}
# Initialize
chain = ReminaConversationChain(
model="gemini-2.0-flash",
remina_config=remina_config
)
user_id = "user_001"
history = []
# Conversation
user_input = "Hi, I'm Alex. I'm a backend engineer specializing in Python and Go."
response = chain.invoke(user_input, user_id, history)
print(f"Assistant: {response}")
history.extend([HumanMessage(content=user_input), AIMessage(content=response)])
# Memory persists across sessions
user_input = "What do you remember about me?"
response = chain.invoke(user_input, user_id, history=[]) # New session
print(f"Assistant: {response}")
chain.close()Running the Example
# Set API key
export GOOGLE_API_KEY="your-api-key"
# Start Redis
docker run -d -p 6379:6379 redis
# Run
python examples/integrations/langchain_memory.py