Hypothetical research exploring how Jido's GenServer architecture could implement sophisticated AI agent memory patterns, including working memory, semantic memory, episodic memory, and procedural memory
AI agent memory research identifies several key patterns that mirror human cognition:
Memory Types by Function:
Memory Operations:
Jido’s GenServer architecture can implement sliding window memory through queue management:
defmodule MyAgent.Memory do
use Jido.Agent, name: "memory_agent"
defstruct [
messages: :queue.new(), # Working memory - recent messages
max_messages: 50, # Limit working memory size
semantic_facts: %{}, # Long-term semantic knowledge
episode_store: [] # Episodic memory for experiences
]
def init(opts) do
{:ok, %__MODULE__{
max_messages: opts[:max_messages] || 50
}}
end
def handle_call({:add_message, message}, _from, state) do
# Add message to working memory queue
messages = :queue.in(message, state.messages)
# Enforce message limit - remove oldest if over limit
{final_messages, removed} = case :queue.len(messages) > state.max_messages do
true ->
{{:value, old_msg}, remaining} = :queue.out(messages)
{remaining, old_msg}
false ->
{messages, nil}
end
# Extract semantic facts from removed message for long-term storage
semantic_updates = case removed do
nil -> state.semantic_facts
old_msg -> extract_semantic_facts(old_msg, state.semantic_facts)
end
new_state = %{state |
messages: final_messages,
semantic_facts: semantic_updates
}
{:reply, :ok, new_state}
end
defp extract_semantic_facts(message, current_facts) do
# Use LLM to extract lasting facts from message
# Example: "User likes vegetarian food" -> semantic memory
case extract_facts_via_llm(message) do
{:ok, new_facts} -> Map.merge(current_facts, new_facts)
_ -> current_facts
end
end
end
Jido’s signal-based architecture allows selective memory encoding based on importance:
defmodule MyAgent.ToolMemory do
use Jido.Agent
def handle_signal(%{type: "jido.ai.tool.response"} = signal, state) do
tool_result = signal.data
# Decision: Should this tool call be remembered?
memory_decision = evaluate_tool_importance(tool_result)
case memory_decision do
:important ->
# Store in episodic memory with full context
episode = %{
timestamp: DateTime.utc_now(),
tool: tool_result.name,
input: tool_result.input,
output: tool_result.output,
context: get_current_context(state),
importance: :high
}
new_episodes = [episode | state.episode_store]
{:ok, %{state | episode_store: new_episodes}}
:semantic_only ->
# Extract facts, don't store full interaction
facts = extract_semantic_info(tool_result)
updated_semantics = Map.merge(state.semantic_facts, facts)
{:ok, %{state | semantic_facts: updated_semantics}}
:ignore ->
# Don't store routine operations (e.g., weather checks)
{:ok, state}
end
end
defp evaluate_tool_importance(tool_result) do
cond do
# User preference changes -> important episodic memory
String.contains?(tool_result.name, "preference") -> :important
# Error conditions -> important for learning
tool_result.status == :error -> :important
# Factual lookups -> extract semantics only
tool_result.name in ["search", "lookup", "calculate"] -> :semantic_only
# Routine operations -> ignore
true -> :ignore
end
end
end
Jido can integrate with vector databases for semantic memory retrieval:
defmodule MyAgent.SemanticMemory do
use Jido.Agent
def handle_call({:semantic_recall, query}, _from, state) do
# Use vector similarity for semantic retrieval
case search_semantic_memory(query, state.vector_store) do
{:ok, relevant_facts} ->
# Add semantic context to working memory
semantic_context = format_semantic_context(relevant_facts)
# Emit signal with semantic context
{:ok, signal} = Jido.Signal.new(%{
type: "memory.semantic.recalled",
data: %{context: semantic_context, query: query}
})
Jido.PubSub.emit(signal)
{:reply, {:ok, semantic_context}, state}
{:error, reason} ->
{:reply, {:error, reason}, state}
end
end
defp search_semantic_memory(query, vector_store_config) do
# Call to external vector database (Chroma, Pinecone, etc.)
# This would typically be an HTTP call to Python service
HTTPoison.post("#{vector_store_config.url}/search",
Jason.encode!(%{query: query, k: 5}),
[{"Content-Type", "application/json"}]
)
|> case do
{:ok, %{status: 200, body: body}} ->
{:ok, Jason.decode!(body)}
error ->
{:error, error}
end
end
end
Jido’s schema validation can implement structured working memory:
defmodule MyAgent.WorkingMemory do
use Jido.Agent
# Define working memory schema
defstruct [
current_task: nil, # Active task context
user_intent: nil, # Parsed user intention
available_tools: [], # Tools relevant to current task
conversation_state: :idle, # State machine for conversation
focus_entities: [], # Currently relevant entities
pending_confirmations: [] # Actions awaiting user confirmation
]
def handle_call({:update_working_memory, updates}, _from, state) do
# Validate updates against schema
case validate_working_memory_update(updates) do
{:ok, validated_updates} ->
new_state = struct(state, validated_updates)
# Emit working memory change event
{:ok, signal} = Jido.Signal.new(%{
type: "memory.working.updated",
data: %{
previous: state,
current: new_state,
changes: validated_updates
}
})
Jido.PubSub.emit(signal)
{:reply, :ok, new_state}
{:error, validation_errors} ->
{:reply, {:error, validation_errors}, state}
end
end
defp validate_working_memory_update(updates) do
# Use Ecto-style validation for working memory structure
changeset = %__MODULE__{}
|> cast(updates, [:current_task, :user_intent, :conversation_state])
|> validate_inclusion(:conversation_state, [:idle, :processing, :awaiting_input])
|> validate_length(:focus_entities, max: 5) # Cognitive load limit
case changeset.valid? do
true -> {:ok, changeset.changes}
false -> {:error, changeset.errors}
end
end
end
defmodule MyAgent.MemoryConsolidation do
use Jido.Agent
def handle_info(:consolidate_memory, state) do
# Run periodic memory consolidation
consolidated_episodes = consolidate_episodic_memory(state.episode_store)
updated_semantics = strengthen_semantic_memories(state.semantic_facts)
# Schedule next consolidation
Process.send_after(self(), :consolidate_memory, :timer.hours(1))
{:noreply, %{state |
episode_store: consolidated_episodes,
semantic_facts: updated_semantics
}}
end
defp consolidate_episodic_memory(episodes) do
now = DateTime.utc_now()
Enum.map(episodes, fn episode ->
# Calculate time-based memory strength
age_hours = DateTime.diff(now, episode.timestamp, :second) / 3600
decay_factor = :math.exp(-age_hours / 168) # 1-week half-life
# Strengthen frequently accessed memories
access_boost = length(episode.access_history || []) * 0.1
final_strength = decay_factor + access_boost
# Extract semantic facts from strong memories
if final_strength > 0.3 do
episode
else
# Convert to semantic knowledge before forgetting
extract_and_convert_to_semantic(episode)
nil
end
end)
|> Enum.filter(& &1)
end
end
defmodule MyAgent.SharedMemory do
use Jido.Agent
def handle_call({:share_memory, target_agent, memory_type}, _from, state) do
# Share specific memory types between agents
shared_data = case memory_type do
:semantic -> state.semantic_facts
:procedural -> state.learned_procedures
:recent_episodes -> Enum.take(state.episode_store, 10)
end
# Send memory to target agent via PubSub
{:ok, signal} = Jido.Signal.new(%{
type: "memory.shared.received",
data: %{
memory_type: memory_type,
data: shared_data,
from_agent: state.agent_id
}
})
Phoenix.PubSub.broadcast(MyApp.PubSub, "agent:#{target_agent}", signal)
{:reply, :ok, state}
end
end
Process Isolation: Each agent’s memory is isolated in its own GenServer, preventing memory corruption
Fault Tolerance: Memory state can be reconstructed if an agent crashes through supervisor restart
Distributed Memory: Agents can share memory across nodes via PubSub and distributed Erlang
Memory Efficiency: 25KB per agent enables thousands of memory-enabled agents
Schema Validation: Jido’s action schemas ensure memory consistency and type safety
Jido’s GenServer foundation provides a solid basis for implementing sophisticated agent memory patterns while leveraging Elixir’s concurrency and fault-tolerance benefits. The framework’s signal-based architecture and schema validation make it particularly well-suited for structured memory management that can scale to production agent deployments.