refactor(meshchat): clean up code formatting and enhance version retrieval for LXST; improve log handling and anomaly detection logic

This commit is contained in:
2026-01-03 18:42:50 -06:00
parent b51d04953f
commit 392fe50f82
6 changed files with 85 additions and 52 deletions

View File

@@ -86,7 +86,6 @@ from meshchatx.src.backend.sideband_commands import SidebandCommands
from meshchatx.src.backend.telemetry_utils import Telemeter from meshchatx.src.backend.telemetry_utils import Telemeter
from meshchatx.src.version import __version__ as app_version from meshchatx.src.version import __version__ as app_version
import collections
import logging import logging
@@ -495,7 +494,7 @@ class ReticulumMeshChat:
self.contexts[identity_hash] = context self.contexts[identity_hash] = context
self.current_context = context self.current_context = context
context.setup() context.setup()
# Link database to memory log handler # Link database to memory log handler
memory_log_handler.set_database(context.database) memory_log_handler.set_database(context.database)
@@ -1166,6 +1165,12 @@ class ReticulumMeshChat:
def get_app_version() -> str: def get_app_version() -> str:
return app_version return app_version
def get_lxst_version(self) -> str:
try:
return importlib.metadata.version("lxst")
except Exception:
return getattr(LXST, "__version__", "unknown")
# automatically announces based on user config # automatically announces based on user config
async def announce_loop(self, session_id, context=None): async def announce_loop(self, session_id, context=None):
ctx = context or self.current_context ctx = context or self.current_context
@@ -3095,7 +3100,7 @@ class ReticulumMeshChat:
"version": self.get_app_version(), "version": self.get_app_version(),
"lxmf_version": LXMF.__version__, "lxmf_version": LXMF.__version__,
"rns_version": RNS.__version__, "rns_version": RNS.__version__,
"lxst_version": getattr(LXST, "__version__", "unknown"), "lxst_version": self.get_lxst_version(),
"python_version": platform.python_version(), "python_version": platform.python_version(),
"dependencies": { "dependencies": {
"aiohttp": importlib.metadata.version("aiohttp"), "aiohttp": importlib.metadata.version("aiohttp"),
@@ -3925,8 +3930,13 @@ class ReticulumMeshChat:
remote_identity_hash = d.get("remote_identity_hash") remote_identity_hash = d.get("remote_identity_hash")
if remote_identity_hash: if remote_identity_hash:
# try to resolve name if unknown or missing # try to resolve name if unknown or missing
if not d.get("remote_identity_name") or d.get("remote_identity_name") == "Unknown": if (
resolved_name = self.get_name_for_identity_hash(remote_identity_hash) not d.get("remote_identity_name")
or d.get("remote_identity_name") == "Unknown"
):
resolved_name = self.get_name_for_identity_hash(
remote_identity_hash
)
if resolved_name: if resolved_name:
d["remote_identity_name"] = resolved_name d["remote_identity_name"] = resolved_name

View File

@@ -23,7 +23,9 @@ class DebugLogsDAO:
), ),
) )
def get_logs(self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None): def get_logs(
self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None
):
sql = "SELECT * FROM debug_logs WHERE 1=1" sql = "SELECT * FROM debug_logs WHERE 1=1"
params = [] params = []
@@ -80,8 +82,9 @@ class DebugLogsDAO:
row = self.provider.fetchone(sql, (max_logs - 1,)) row = self.provider.fetchone(sql, (max_logs - 1,))
if row: if row:
cutoff_ts = row["timestamp"] cutoff_ts = row["timestamp"]
self.provider.execute("DELETE FROM debug_logs WHERE timestamp < ?", (cutoff_ts,)) self.provider.execute(
"DELETE FROM debug_logs WHERE timestamp < ?", (cutoff_ts,)
)
def get_anomalies(self, limit=50): def get_anomalies(self, limit=50):
return self.get_logs(limit=limit, is_anomaly=True) return self.get_logs(limit=limit, is_anomaly=True)

View File

@@ -107,11 +107,13 @@ class DatabaseProvider:
def fetchone(self, query, params=None): def fetchone(self, query, params=None):
cursor = self.execute(query, params) cursor = self.execute(query, params)
return cursor.fetchone() row = cursor.fetchone()
return dict(row) if row else None
def fetchall(self, query, params=None): def fetchall(self, query, params=None):
cursor = self.execute(query, params) cursor = self.execute(query, params)
return cursor.fetchall() rows = cursor.fetchall()
return [dict(row) for row in rows]
def close(self): def close(self):
if hasattr(self._local, "connection"): if hasattr(self._local, "connection"):

View File

@@ -86,10 +86,9 @@ class DocsManager:
dest_path = os.path.join(self.meshchatx_docs_dir, file) dest_path = os.path.join(self.meshchatx_docs_dir, file)
# Only copy if source and destination are different # Only copy if source and destination are different
if ( if os.path.abspath(src_path) != os.path.abspath(
os.path.abspath(src_path) != os.path.abspath(dest_path) dest_path
and os.access(self.meshchatx_docs_dir, os.W_OK) ) and os.access(self.meshchatx_docs_dir, os.W_OK):
):
shutil.copy2(src_path, dest_path) shutil.copy2(src_path, dest_path)
# Also pre-render to HTML for easy sharing/viewing # Also pre-render to HTML for easy sharing/viewing

View File

@@ -69,10 +69,16 @@ class IntegrityManager:
m_date = manifest.get("date", "Unknown") m_date = manifest.get("date", "Unknown")
m_time = manifest.get("time", "Unknown") m_time = manifest.get("time", "Unknown")
m_id = manifest.get("identity", "Unknown") m_id = manifest.get("identity", "Unknown")
issues.insert(0, f"Last integrity snapshot: {m_date} {m_time} (Identity: {m_id})") issues.insert(
0, f"Last integrity snapshot: {m_date} {m_time} (Identity: {m_id})"
)
# Check if identity matches # Check if identity matches
if self.identity_hash and m_id != "Unknown" and self.identity_hash != m_id: if (
self.identity_hash
and m_id != "Unknown"
and self.identity_hash != m_id
):
issues.append(f"Identity mismatch! Manifest belongs to: {m_id}") issues.append(f"Identity mismatch! Manifest belongs to: {m_id}")
self.issues = issues self.issues = issues

View File

@@ -14,11 +14,11 @@ class PersistentLogHandler(logging.Handler):
self.last_flush_time = time.time() self.last_flush_time = time.time()
self.lock = threading.RLock() self.lock = threading.RLock()
self.flush_lock = threading.Lock() self.flush_lock = threading.Lock()
# Anomaly detection state # Anomaly detection state
self.recent_messages = collections.deque(maxlen=100) self.recent_messages = collections.deque(maxlen=100)
self.flooding_threshold = 20 # messages per second self.flooding_threshold = 20 # messages per second
self.repeat_threshold = 5 # identical messages in a row self.repeat_threshold = 5 # identical messages in a row
self.message_counts = collections.defaultdict(int) self.message_counts = collections.defaultdict(int)
self.last_reset_time = time.time() self.last_reset_time = time.time()
@@ -30,9 +30,9 @@ class PersistentLogHandler(logging.Handler):
try: try:
msg = self.format(record) msg = self.format(record)
timestamp = datetime.now(UTC).timestamp() timestamp = datetime.now(UTC).timestamp()
is_anomaly, anomaly_type = self._detect_anomaly(record, msg, timestamp) is_anomaly, anomaly_type = self._detect_anomaly(record, msg, timestamp)
log_entry = { log_entry = {
"timestamp": timestamp, "timestamp": timestamp,
"level": record.levelname, "level": record.levelname,
@@ -41,29 +41,35 @@ class PersistentLogHandler(logging.Handler):
"is_anomaly": 1 if is_anomaly else 0, "is_anomaly": 1 if is_anomaly else 0,
"anomaly_type": anomaly_type, "anomaly_type": anomaly_type,
} }
with self.lock: with self.lock:
self.logs_buffer.append(log_entry) self.logs_buffer.append(log_entry)
# Periodically flush to database if available # Periodically flush to database if available
if self.database and (time.time() - self.last_flush_time > self.flush_interval): if self.database and (
time.time() - self.last_flush_time > self.flush_interval
):
self._flush_to_db() self._flush_to_db()
except Exception: except Exception:
self.handleError(record) self.handleError(record)
def _detect_anomaly(self, record, message, timestamp): def _detect_anomaly(self, record, message, timestamp):
# Only detect anomalies for WARNING level and above
if record.levelno < logging.WARNING:
return False, None
now = time.time() now = time.time()
# 1. Detect Log Flooding # 1. Detect Log Flooding
if now - self.last_reset_time > 1.0: if now - self.last_reset_time > 1.0:
self.message_counts.clear() self.message_counts.clear()
self.last_reset_time = now self.last_reset_time = now
self.message_counts["total"] += 1 self.message_counts["total"] += 1
if self.message_counts["total"] > self.flooding_threshold: if self.message_counts["total"] > self.flooding_threshold:
return True, "flooding" return True, "flooding"
# 2. Detect Repeats # 2. Detect Repeats
if len(self.recent_messages) > 0: if len(self.recent_messages) > 0:
repeat_count = 0 repeat_count = 0
@@ -72,17 +78,17 @@ class PersistentLogHandler(logging.Handler):
repeat_count += 1 repeat_count += 1
else: else:
break break
if repeat_count >= self.repeat_threshold: if repeat_count >= self.repeat_threshold:
return True, "repeat" return True, "repeat"
self.recent_messages.append(message) self.recent_messages.append(message)
return False, None return False, None
def _flush_to_db(self): def _flush_to_db(self):
if not self.database: if not self.database:
return return
# Ensure only one thread flushes at a time # Ensure only one thread flushes at a time
if not self.flush_lock.acquire(blocking=False): if not self.flush_lock.acquire(blocking=False):
return return
@@ -92,7 +98,7 @@ class PersistentLogHandler(logging.Handler):
with self.lock: with self.lock:
while self.logs_buffer: while self.logs_buffer:
items_to_flush.append(self.logs_buffer.popleft()) items_to_flush.append(self.logs_buffer.popleft())
if not items_to_flush: if not items_to_flush:
return return
@@ -104,49 +110,60 @@ class PersistentLogHandler(logging.Handler):
module=entry["module"], module=entry["module"],
message=entry["message"], message=entry["message"],
is_anomaly=entry["is_anomaly"], is_anomaly=entry["is_anomaly"],
anomaly_type=entry["anomaly_type"] anomaly_type=entry["anomaly_type"],
) )
except Exception as e: except Exception as e:
print(f"Error inserting log: {e}") print(f"Error inserting log: {e}")
# Periodic cleanup of old logs (only every 100 flushes or similar? # Periodic cleanup of old logs (only every 100 flushes or similar?
# for now let's just keep it here but it should be fast) # for now let's just keep it here but it should be fast)
try: try:
self.database.debug_logs.cleanup_old_logs() self.database.debug_logs.cleanup_old_logs()
except Exception as e: except Exception as e:
print(f"Error cleaning up logs: {e}") print(f"Error cleaning up logs: {e}")
self.last_flush_time = time.time() self.last_flush_time = time.time()
except Exception as e: except Exception as e:
print(f"Failed to flush logs to database: {e}") print(f"Failed to flush logs to database: {e}")
finally: finally:
self.flush_lock.release() self.flush_lock.release()
def get_logs(self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None): def get_logs(
self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None
):
if self.database: if self.database:
# Flush current buffer first to ensure we have latest logs # Flush current buffer first to ensure we have latest logs
self._flush_to_db() self._flush_to_db()
with self.lock: with self.lock:
if self.database: if self.database:
return self.database.debug_logs.get_logs( return self.database.debug_logs.get_logs(
limit=limit, limit=limit,
offset=offset, offset=offset,
search=search, search=search,
level=level, level=level,
module=module, module=module,
is_anomaly=is_anomaly is_anomaly=is_anomaly,
) )
else: else:
# Fallback to in-memory buffer if DB not yet available # Fallback to in-memory buffer if DB not yet available
logs = list(self.logs_buffer) logs = list(self.logs_buffer)
if search: if search:
logs = [l for l in logs if search.lower() in l["message"].lower() or search.lower() in l["module"].lower()] logs = [
log
for log in logs
if search.lower() in log["message"].lower()
or search.lower() in log["module"].lower()
]
if level: if level:
logs = [l for l in logs if l["level"] == level] logs = [log for log in logs if log["level"] == level]
if is_anomaly is not None: if is_anomaly is not None:
logs = [l for l in logs if l["is_anomaly"] == (1 if is_anomaly else 0)] logs = [
log
for log in logs
if log["is_anomaly"] == (1 if is_anomaly else 0)
]
# Sort descending # Sort descending
logs.sort(key=lambda x: x["timestamp"], reverse=True) logs.sort(key=lambda x: x["timestamp"], reverse=True)
return logs[offset : offset + limit] return logs[offset : offset + limit]
@@ -155,11 +172,7 @@ class PersistentLogHandler(logging.Handler):
with self.lock: with self.lock:
if self.database: if self.database:
return self.database.debug_logs.get_total_count( return self.database.debug_logs.get_total_count(
search=search, search=search, level=level, module=module, is_anomaly=is_anomaly
level=level,
module=module,
is_anomaly=is_anomaly
) )
else: else:
return len(self.logs_buffer) return len(self.logs_buffer)