Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
8ffdf4e
feat: delete custom_logger_handler (#289)
CarltonXiang Sep 10, 2025
541191f
fix: change env model name (#292)
fridayL Sep 10, 2025
99d8e19
fix:#286:https://github.com/MemTensor/MemOS/issues/286 (#293)
kakack Sep 11, 2025
6b8cf85
Feat:add self defined memcube id for reg user (#295)
fridayL Sep 11, 2025
709f87c
update: test branch ci
fridayL Sep 15, 2025
5285829
Feat/add opentelmetry (#298)
CarltonXiang Sep 15, 2025
0b0cad8
feat: add orginal context for reranking (#284)
fridayL Sep 15, 2025
7b245b9
revert: nebular require_python (#300)
CarltonXiang Sep 15, 2025
8f87b33
feat: chat bot api (#294)
CaralHsi Sep 15, 2025
c688ead
feat: chat bot api (#302)
CaralHsi Sep 16, 2025
778c3b4
feat: chat bot api, add reranker filter; fix pydantic bug (#303)
CaralHsi Sep 16, 2025
30cfdbf
fix: bug in internet pydantic error (#304)
CaralHsi Sep 16, 2025
065a378
Feat/add opentelmetry (#307)
CarltonXiang Sep 16, 2025
02b0983
feat: update nebula to nebula 5.1.1 (#311)
CaralHsi Sep 17, 2025
9be4cb5
fix: nebula multi db bug (#313)
CaralHsi Sep 17, 2025
05dac26
Feat/memos client (#312)
CarltonXiang Sep 18, 2025
663c157
Feat: add time log for threaddict and change openai packacge singleto…
fridayL Sep 18, 2025
6304368
rebase to address conflicts
tangg555 Jul 25, 2025
6f8963a
fix bugs: fix a bug in retriever, and add new auth info for neo4j db
tangg555 Jul 29, 2025
d62ff33
fix bugs & new feat: fix bugs in mem_scheduler examples, and remove i…
tangg555 Jul 31, 2025
945c44b
fix bugs: modify configs, examples, schedule handlers of mem_schedule…
tangg555 Aug 6, 2025
f930506
new feat: allow load auth config from env
tangg555 Aug 7, 2025
9deb368
finish the first verion code of orm, but it still has some problems w…
tangg555 Aug 28, 2025
e4fa4f2
new version of scheduler: 1. orm support for monitors 2. refined sear…
tangg555 Sep 10, 2025
44f0138
refactor: refactor the eval function of the scheduler
tangg555 Sep 11, 2025
cb9519d
fix bugs caused by auth config in tests
tangg555 Sep 11, 2025
e024fea
modify scheduler evaluation codes
tangg555 Sep 16, 2025
020e6c6
add the first version of scheduler test by creating temporal locomo b…
tangg555 Sep 16, 2025
391b422
fix bugs in temporal locomo codes in evaluation
tangg555 Sep 18, 2025
fcdb21c
fix bugs in text mem with neo4j backend, and set huggingface backend …
tangg555 Sep 19, 2025
c407987
Feat/add timerlog (#317)
fridayL Sep 19, 2025
f8e972d
Feat/add opentelmetry (#315)
CarltonXiang Sep 19, 2025
1dc230a
feat: add api client (#316)
CarltonXiang Sep 19, 2025
267a0c1
Feat: add segment lock dict (#319)
fridayL Sep 20, 2025
1dc3b2e
fix:fix dump parallel for dumps cubes (#320)
fridayL Sep 21, 2025
beb0e07
feat: add sinlgleton (#321)
fridayL Sep 22, 2025
301178d
feat: nebula&reorganize update (#322)
CaralHsi Sep 22, 2025
5639a91
fix: nebula reset bug (#323)
CaralHsi Sep 22, 2025
5b27384
feat: add default processing in mem-reader (#325)
CaralHsi Sep 22, 2025
ee89e68
feat:add time step (#326)
fridayL Sep 23, 2025
bb63d7a
Feat:add time step (#327)
fridayL Sep 23, 2025
a4de6bd
docker start (#324)
pursues Sep 23, 2025
46406ea
Feat: remove json (#328)
fridayL Sep 23, 2025
b25f68b
feat: remove (#329)
fridayL Sep 23, 2025
312223c
fix: not include embedding (#330)
CaralHsi Sep 23, 2025
3646712
Feat/add time step (#331)
fridayL Sep 23, 2025
8c1e4ee
Feat/add time step (#332)
fridayL Sep 23, 2025
94ec427
new feat: add a rule-based baseline which uses historical evidences t…
tangg555 Sep 22, 2025
4fa115b
new feat: add eval and metric codes into the pipeline, and fix the bu…
tangg555 Sep 23, 2025
7119091
Fix/default add (#333)
CaralHsi Sep 23, 2025
4a4abca
fix the bugs in rule-based baselines, and change the temporal data so…
tangg555 Sep 23, 2025
73c9fa1
feat: recall and searcher use parallel (#337)
lijicode Sep 24, 2025
1f57a62
feat: api client (#334)
CarltonXiang Sep 24, 2025
a75af07
Merge branch 'dev' into test
CaralHsi Sep 24, 2025
f8d8c60
feat: API 1.0 (#339)
CaralHsi Sep 24, 2025
62cce47
fix: format (#341)
CaralHsi Sep 24, 2025
6f1159f
Merge branch 'main' into dev
CaralHsi Sep 24, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
finish the first verion code of orm, but it still has some problems w…
…hen running scheduler
  • Loading branch information
tangg555 committed Sep 19, 2025
commit 9deb36865e94913fe83e51384f0fd9a69c1a5162
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ share/python-wheels/
.installed.cfg
*.egg
MANIFEST
.run

# PyInstaller
# Usually these files are written by a python script from a template
Expand Down
9 changes: 0 additions & 9 deletions .vscode/settings.json

This file was deleted.

2 changes: 1 addition & 1 deletion examples/mem_scheduler/memos_w_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@


if TYPE_CHECKING:
from memos.mem_scheduler.schemas import (
from memos.mem_scheduler.schemas.message_schemas import (
ScheduleLogForWebItem,
)

Expand Down
21 changes: 13 additions & 8 deletions src/memos/configs/mem_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,19 @@ class GeneralSchedulerConfig(BaseSchedulerConfig):
default=20, description="Capacity of the activation memory monitor"
)

# Database configuration for ORM persistence
db_path: str | None = Field(
default=None,
description="Path to SQLite database file for ORM persistence. If None, uses default scheduler_orm.db",
)
db_url: str | None = Field(
default=None,
description="Database URL for ORM persistence (e.g., mysql://user:pass@host/db). Takes precedence over db_path",
)
enable_orm_persistence: bool = Field(
default=True, description="Whether to enable ORM-based persistence for monitors"
)


class SchedulerConfigFactory(BaseConfig):
"""Factory class for creating scheduler configurations."""
Expand Down Expand Up @@ -111,10 +124,6 @@ class RabbitMQConfig(
le=65535, # Port must be <= 65535
)

@classmethod
def get_env_prefix(cls) -> str:
return "RABBITMQ_"


class GraphDBAuthConfig(BaseConfig, DictConversionMixin, EnvConfigMixin):
uri: str = Field(
Expand All @@ -132,10 +141,6 @@ class GraphDBAuthConfig(BaseConfig, DictConversionMixin, EnvConfigMixin):
default=True, description="Whether to automatically create the database if it doesn't exist"
)

@classmethod
def get_env_prefix(cls) -> str:
return "GRAPH_DB_"


class OpenAIConfig(BaseConfig, DictConversionMixin, EnvConfigMixin):
api_key: str = Field(default="", description="API key for OpenAI service")
Expand Down
140 changes: 88 additions & 52 deletions src/memos/mem_scheduler/base_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from datetime import datetime
from pathlib import Path

from sqlalchemy.engine import Engine

from memos.configs.mem_scheduler import AuthConfig, BaseSchedulerConfig
from memos.llms.base import BaseLLM
from memos.log import get_logger
Expand Down Expand Up @@ -62,6 +64,7 @@ def __init__(self, config: BaseSchedulerConfig):
)

self.retriever: SchedulerRetriever | None = None
self.db_engine: Engine | None = None
self.monitor: SchedulerGeneralMonitor | None = None
self.dispatcher_monitor: SchedulerDispatcherMonitor | None = None
self.dispatcher = SchedulerDispatcher(
Expand All @@ -70,12 +73,12 @@ def __init__(self, config: BaseSchedulerConfig):
)

# internal message queue
self.max_internal_messae_queue_size = 100
self.max_internal_message_queue_size = 100
self.memos_message_queue: Queue[ScheduleMessageItem] = Queue(
maxsize=self.max_internal_messae_queue_size
maxsize=self.max_internal_message_queue_size
)
self._web_log_message_queue: Queue[ScheduleLogForWebItem] = Queue(
maxsize=self.max_internal_messae_queue_size
maxsize=self.max_internal_message_queue_size
)
self._consumer_thread = None # Reference to our consumer thread
self._running = False
Expand All @@ -92,34 +95,56 @@ def __init__(self, config: BaseSchedulerConfig):
self.auth_config = None
self.rabbitmq_config = None

def initialize_modules(self, chat_llm: BaseLLM, process_llm: BaseLLM | None = None):
def initialize_modules(
self,
chat_llm: BaseLLM,
process_llm: BaseLLM | None = None,
db_engine: Engine | None = None,
):
if process_llm is None:
process_llm = chat_llm

# initialize submodules
self.chat_llm = chat_llm
self.process_llm = process_llm
self.monitor = SchedulerGeneralMonitor(process_llm=self.process_llm, config=self.config)
self.dispatcher_monitor = SchedulerDispatcherMonitor(config=self.config)
self.retriever = SchedulerRetriever(process_llm=self.process_llm, config=self.config)
try:
# initialize submodules
self.chat_llm = chat_llm
self.process_llm = process_llm
self.db_engine = db_engine
self.monitor = SchedulerGeneralMonitor(
process_llm=self.process_llm, config=self.config, db_engine=self.db_engine
)
self.dispatcher_monitor = SchedulerDispatcherMonitor(config=self.config)
self.retriever = SchedulerRetriever(process_llm=self.process_llm, config=self.config)

if self.enable_parallel_dispatch:
self.dispatcher_monitor.initialize(dispatcher=self.dispatcher)
self.dispatcher_monitor.start()

# initialize with auth_config
if self.auth_config_path is not None and Path(self.auth_config_path).exists():
self.auth_config = AuthConfig.from_local_config(config_path=self.auth_config_path)
elif AuthConfig.default_config_exists():
self.auth_config = AuthConfig.from_local_config()
else:
self.auth_config = AuthConfig.from_local_env()

if self.enable_parallel_dispatch:
self.dispatcher_monitor.initialize(dispatcher=self.dispatcher)
self.dispatcher_monitor.start()

# initialize with auth_cofig
if self.auth_config_path is not None and Path(self.auth_config_path).exists():
self.auth_config = AuthConfig.from_local_config(config_path=self.auth_config_path)
elif AuthConfig.default_config_exists():
self.auth_config = AuthConfig.from_local_config()
else:
self.auth_config = None
if self.auth_config is not None:
self.rabbitmq_config = self.auth_config.rabbitmq
self.initialize_rabbitmq(config=self.rabbitmq_config)

if self.auth_config is not None:
self.rabbitmq_config = self.auth_config.rabbitmq
self.initialize_rabbitmq(config=self.rabbitmq_config)
logger.debug("GeneralScheduler has been initialized")
except Exception as e:
logger.error(f"Failed to initialize scheduler modules: {e}", exc_info=True)
# Clean up any partially initialized resources
self._cleanup_on_init_failure()
raise

logger.debug("GeneralScheduler has been initialized")
def _cleanup_on_init_failure(self):
"""Clean up resources if initialization fails."""
try:
if hasattr(self, "dispatcher_monitor") and self.dispatcher_monitor is not None:
self.dispatcher_monitor.stop()
except Exception as e:
logger.warning(f"Error during cleanup: {e}")

@property
def mem_cube(self) -> GeneralMemCube:
Expand Down Expand Up @@ -200,8 +225,10 @@ def replace_working_memory(
text_mem_base: TreeTextMemory = text_mem_base

# process rerank memories with llm
query_monitor = self.monitor.query_monitors[user_id][mem_cube_id]
query_history = query_monitor.get_queries_with_timesort()
query_db_manager = self.monitor.query_monitors[user_id][mem_cube_id]
# Sync with database to get latest query history
query_db_manager.sync_with_orm()
query_history = query_db_manager.obj.get_queries_with_timesort()
memories_with_new_order, rerank_success_flag = (
self.retriever.process_and_rerank_memories(
queries=query_history,
Expand All @@ -212,7 +239,7 @@ def replace_working_memory(
)

# update working memory monitors
query_keywords = query_monitor.get_keywords_collections()
query_keywords = query_db_manager.obj.get_keywords_collections()
logger.info(
f"Processing {len(memories_with_new_order)} memories with {len(query_keywords)} query keywords"
)
Expand All @@ -235,7 +262,7 @@ def replace_working_memory(

mem_monitors: list[MemoryMonitorItem] = self.monitor.working_memory_monitors[user_id][
mem_cube_id
].get_sorted_mem_monitors(reverse=True)
].obj.get_sorted_mem_monitors(reverse=True)
new_working_memories = [mem_monitor.tree_memory_item for mem_monitor in mem_monitors]

text_mem_base.replace_working_memory(memories=new_working_memories)
Expand Down Expand Up @@ -278,6 +305,7 @@ def update_activation_memory(
new_text_memories = new_memories
else:
logger.error("Not Implemented.")
return

try:
if isinstance(mem_cube.act_mem, VLLMKVCacheMemory):
Expand Down Expand Up @@ -333,7 +361,9 @@ def update_activation_memory(
)

except Exception as e:
logger.warning(f"MOS-based activation memory update failed: {e}", exc_info=True)
logger.error(f"MOS-based activation memory update failed: {e}", exc_info=True)
# Re-raise the exception if it's critical for the operation
# For now, we'll continue execution but this should be reviewed

def update_activation_memory_periodically(
self,
Expand All @@ -358,7 +388,8 @@ def update_activation_memory_periodically(
if (
user_id not in self.monitor.working_memory_monitors
or mem_cube_id not in self.monitor.working_memory_monitors[user_id]
or len(self.monitor.working_memory_monitors[user_id][mem_cube_id].memories) == 0
or len(self.monitor.working_memory_monitors[user_id][mem_cube_id].obj.memories)
== 0
):
logger.warning(
"No memories found in working_memory_monitors, activation memory update is skipped"
Expand All @@ -369,9 +400,13 @@ def update_activation_memory_periodically(
user_id=user_id, mem_cube_id=mem_cube_id, mem_cube=mem_cube
)

# Sync with database to get latest activation memories
activation_db_manager = self.monitor.activation_memory_monitors[user_id][
mem_cube_id
]
activation_db_manager.sync_with_orm()
new_activation_memories = [
m.memory_text
for m in self.monitor.activation_memory_monitors[user_id][mem_cube_id].memories
m.memory_text for m in activation_db_manager.obj.memories
]

logger.info(
Expand Down Expand Up @@ -461,25 +496,26 @@ def _message_consumer(self) -> None:
"""
while self._running: # Use a running flag for graceful shutdown
try:
# Check if queue has messages (non-blocking)
if not self.memos_message_queue.empty():
# Get all available messages at once
messages = []
while not self.memos_message_queue.empty():
try:
messages.append(self.memos_message_queue.get_nowait())
except queue.Empty:
break

if messages:
try:
self.dispatcher.dispatch(messages)
except Exception as e:
logger.error(f"Error dispatching messages: {e!s}")
finally:
# Mark all messages as processed
for _ in messages:
self.memos_message_queue.task_done()
# Get all available messages at once (thread-safe approach)
messages = []
while True:
try:
# Use get_nowait() directly without empty() check to avoid race conditions
message = self.memos_message_queue.get_nowait()
messages.append(message)
except queue.Empty:
# No more messages available
break

if messages:
try:
self.dispatcher.dispatch(messages)
except Exception as e:
logger.error(f"Error dispatching messages: {e!s}")
finally:
# Mark all messages as processed
for _ in messages:
self.memos_message_queue.task_done()

# Sleep briefly to prevent busy waiting
time.sleep(self._consume_interval) # Adjust interval as needed
Expand Down
33 changes: 32 additions & 1 deletion src/memos/mem_scheduler/general_modules/misc.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
class EnvConfigMixin(Generic[T]):
"""Abstract base class for environment variable configuration."""

ENV_PREFIX = "MEMSCHEDULER_"

@classmethod
def get_env_prefix(cls) -> str:
"""Automatically generates environment variable prefix from class name.
Expand All @@ -37,7 +39,8 @@ def get_env_prefix(cls) -> str:
if class_name.endswith("Config"):
class_name = class_name[:-6]
# Convert to uppercase and add trailing underscore
return f"{class_name.upper()}_"

return f"{cls.ENV_PREFIX}{class_name.upper()}_"

@classmethod
def from_env(cls: type[T]) -> T:
Expand Down Expand Up @@ -111,6 +114,26 @@ def to_dict(self) -> dict:
dump_data["timestamp"] = self.serialize_datetime(self.timestamp, None)
return dump_data

def to_json(self, **kwargs) -> str:
"""
Convert model instance to a JSON string.
- Accepts the same kwargs as json.dumps (e.g., indent, ensure_ascii)
- Default settings make JSON human-readable and UTF-8 safe
"""
return json.dumps(self.to_dict(), ensure_ascii=False, default=lambda o: str(o), **kwargs)

@classmethod
def from_json(cls: type[BaseModelType], json_str: str) -> BaseModelType:
"""
Create model instance from a JSON string.
- Parses JSON into a dictionary and delegates to from_dict
"""
try:
data = json.loads(json_str)
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON string: {e}") from e
return cls.from_dict(data)

@classmethod
def from_dict(cls: type[BaseModelType], data: dict) -> BaseModelType:
"""
Expand Down Expand Up @@ -169,3 +192,11 @@ def put(self, item: T, block: bool = False, timeout: float | None = None) -> Non
def get_queue_content_without_pop(self) -> list[T]:
"""Return a copy of the queue's contents without modifying it."""
return list(self.queue)

def clear(self) -> None:
"""Remove all items from the queue.

This operation is thread-safe.
"""
with self.mutex:
self.queue.clear()
Loading