refactor(meshchat): clean up code formatting and enhance version retrieval for LXST; improve log handling and anomaly detection logic
This commit is contained in:
@@ -86,7 +86,6 @@ from meshchatx.src.backend.sideband_commands import SidebandCommands
|
||||
from meshchatx.src.backend.telemetry_utils import Telemeter
|
||||
from meshchatx.src.version import __version__ as app_version
|
||||
|
||||
import collections
|
||||
import logging
|
||||
|
||||
|
||||
@@ -495,7 +494,7 @@ class ReticulumMeshChat:
|
||||
self.contexts[identity_hash] = context
|
||||
self.current_context = context
|
||||
context.setup()
|
||||
|
||||
|
||||
# Link database to memory log handler
|
||||
memory_log_handler.set_database(context.database)
|
||||
|
||||
@@ -1166,6 +1165,12 @@ class ReticulumMeshChat:
|
||||
def get_app_version() -> str:
|
||||
return app_version
|
||||
|
||||
def get_lxst_version(self) -> str:
|
||||
try:
|
||||
return importlib.metadata.version("lxst")
|
||||
except Exception:
|
||||
return getattr(LXST, "__version__", "unknown")
|
||||
|
||||
# automatically announces based on user config
|
||||
async def announce_loop(self, session_id, context=None):
|
||||
ctx = context or self.current_context
|
||||
@@ -3095,7 +3100,7 @@ class ReticulumMeshChat:
|
||||
"version": self.get_app_version(),
|
||||
"lxmf_version": LXMF.__version__,
|
||||
"rns_version": RNS.__version__,
|
||||
"lxst_version": getattr(LXST, "__version__", "unknown"),
|
||||
"lxst_version": self.get_lxst_version(),
|
||||
"python_version": platform.python_version(),
|
||||
"dependencies": {
|
||||
"aiohttp": importlib.metadata.version("aiohttp"),
|
||||
@@ -3925,8 +3930,13 @@ class ReticulumMeshChat:
|
||||
remote_identity_hash = d.get("remote_identity_hash")
|
||||
if remote_identity_hash:
|
||||
# try to resolve name if unknown or missing
|
||||
if not d.get("remote_identity_name") or d.get("remote_identity_name") == "Unknown":
|
||||
resolved_name = self.get_name_for_identity_hash(remote_identity_hash)
|
||||
if (
|
||||
not d.get("remote_identity_name")
|
||||
or d.get("remote_identity_name") == "Unknown"
|
||||
):
|
||||
resolved_name = self.get_name_for_identity_hash(
|
||||
remote_identity_hash
|
||||
)
|
||||
if resolved_name:
|
||||
d["remote_identity_name"] = resolved_name
|
||||
|
||||
|
||||
@@ -23,7 +23,9 @@ class DebugLogsDAO:
|
||||
),
|
||||
)
|
||||
|
||||
def get_logs(self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None):
|
||||
def get_logs(
|
||||
self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None
|
||||
):
|
||||
sql = "SELECT * FROM debug_logs WHERE 1=1"
|
||||
params = []
|
||||
|
||||
@@ -80,8 +82,9 @@ class DebugLogsDAO:
|
||||
row = self.provider.fetchone(sql, (max_logs - 1,))
|
||||
if row:
|
||||
cutoff_ts = row["timestamp"]
|
||||
self.provider.execute("DELETE FROM debug_logs WHERE timestamp < ?", (cutoff_ts,))
|
||||
self.provider.execute(
|
||||
"DELETE FROM debug_logs WHERE timestamp < ?", (cutoff_ts,)
|
||||
)
|
||||
|
||||
def get_anomalies(self, limit=50):
|
||||
return self.get_logs(limit=limit, is_anomaly=True)
|
||||
|
||||
|
||||
@@ -107,11 +107,13 @@ class DatabaseProvider:
|
||||
|
||||
def fetchone(self, query, params=None):
|
||||
cursor = self.execute(query, params)
|
||||
return cursor.fetchone()
|
||||
row = cursor.fetchone()
|
||||
return dict(row) if row else None
|
||||
|
||||
def fetchall(self, query, params=None):
|
||||
cursor = self.execute(query, params)
|
||||
return cursor.fetchall()
|
||||
rows = cursor.fetchall()
|
||||
return [dict(row) for row in rows]
|
||||
|
||||
def close(self):
|
||||
if hasattr(self._local, "connection"):
|
||||
|
||||
@@ -86,10 +86,9 @@ class DocsManager:
|
||||
dest_path = os.path.join(self.meshchatx_docs_dir, file)
|
||||
|
||||
# Only copy if source and destination are different
|
||||
if (
|
||||
os.path.abspath(src_path) != os.path.abspath(dest_path)
|
||||
and os.access(self.meshchatx_docs_dir, os.W_OK)
|
||||
):
|
||||
if os.path.abspath(src_path) != os.path.abspath(
|
||||
dest_path
|
||||
) and os.access(self.meshchatx_docs_dir, os.W_OK):
|
||||
shutil.copy2(src_path, dest_path)
|
||||
|
||||
# Also pre-render to HTML for easy sharing/viewing
|
||||
|
||||
@@ -69,10 +69,16 @@ class IntegrityManager:
|
||||
m_date = manifest.get("date", "Unknown")
|
||||
m_time = manifest.get("time", "Unknown")
|
||||
m_id = manifest.get("identity", "Unknown")
|
||||
issues.insert(0, f"Last integrity snapshot: {m_date} {m_time} (Identity: {m_id})")
|
||||
issues.insert(
|
||||
0, f"Last integrity snapshot: {m_date} {m_time} (Identity: {m_id})"
|
||||
)
|
||||
|
||||
# Check if identity matches
|
||||
if self.identity_hash and m_id != "Unknown" and self.identity_hash != m_id:
|
||||
if (
|
||||
self.identity_hash
|
||||
and m_id != "Unknown"
|
||||
and self.identity_hash != m_id
|
||||
):
|
||||
issues.append(f"Identity mismatch! Manifest belongs to: {m_id}")
|
||||
|
||||
self.issues = issues
|
||||
|
||||
@@ -14,11 +14,11 @@ class PersistentLogHandler(logging.Handler):
|
||||
self.last_flush_time = time.time()
|
||||
self.lock = threading.RLock()
|
||||
self.flush_lock = threading.Lock()
|
||||
|
||||
|
||||
# Anomaly detection state
|
||||
self.recent_messages = collections.deque(maxlen=100)
|
||||
self.flooding_threshold = 20 # messages per second
|
||||
self.repeat_threshold = 5 # identical messages in a row
|
||||
self.repeat_threshold = 5 # identical messages in a row
|
||||
self.message_counts = collections.defaultdict(int)
|
||||
self.last_reset_time = time.time()
|
||||
|
||||
@@ -30,9 +30,9 @@ class PersistentLogHandler(logging.Handler):
|
||||
try:
|
||||
msg = self.format(record)
|
||||
timestamp = datetime.now(UTC).timestamp()
|
||||
|
||||
|
||||
is_anomaly, anomaly_type = self._detect_anomaly(record, msg, timestamp)
|
||||
|
||||
|
||||
log_entry = {
|
||||
"timestamp": timestamp,
|
||||
"level": record.levelname,
|
||||
@@ -41,29 +41,35 @@ class PersistentLogHandler(logging.Handler):
|
||||
"is_anomaly": 1 if is_anomaly else 0,
|
||||
"anomaly_type": anomaly_type,
|
||||
}
|
||||
|
||||
|
||||
with self.lock:
|
||||
self.logs_buffer.append(log_entry)
|
||||
|
||||
|
||||
# Periodically flush to database if available
|
||||
if self.database and (time.time() - self.last_flush_time > self.flush_interval):
|
||||
if self.database and (
|
||||
time.time() - self.last_flush_time > self.flush_interval
|
||||
):
|
||||
self._flush_to_db()
|
||||
|
||||
|
||||
except Exception:
|
||||
self.handleError(record)
|
||||
|
||||
def _detect_anomaly(self, record, message, timestamp):
|
||||
# Only detect anomalies for WARNING level and above
|
||||
if record.levelno < logging.WARNING:
|
||||
return False, None
|
||||
|
||||
now = time.time()
|
||||
|
||||
|
||||
# 1. Detect Log Flooding
|
||||
if now - self.last_reset_time > 1.0:
|
||||
self.message_counts.clear()
|
||||
self.last_reset_time = now
|
||||
|
||||
|
||||
self.message_counts["total"] += 1
|
||||
if self.message_counts["total"] > self.flooding_threshold:
|
||||
return True, "flooding"
|
||||
|
||||
|
||||
# 2. Detect Repeats
|
||||
if len(self.recent_messages) > 0:
|
||||
repeat_count = 0
|
||||
@@ -72,17 +78,17 @@ class PersistentLogHandler(logging.Handler):
|
||||
repeat_count += 1
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
if repeat_count >= self.repeat_threshold:
|
||||
return True, "repeat"
|
||||
|
||||
|
||||
self.recent_messages.append(message)
|
||||
return False, None
|
||||
|
||||
def _flush_to_db(self):
|
||||
if not self.database:
|
||||
return
|
||||
|
||||
|
||||
# Ensure only one thread flushes at a time
|
||||
if not self.flush_lock.acquire(blocking=False):
|
||||
return
|
||||
@@ -92,7 +98,7 @@ class PersistentLogHandler(logging.Handler):
|
||||
with self.lock:
|
||||
while self.logs_buffer:
|
||||
items_to_flush.append(self.logs_buffer.popleft())
|
||||
|
||||
|
||||
if not items_to_flush:
|
||||
return
|
||||
|
||||
@@ -104,49 +110,60 @@ class PersistentLogHandler(logging.Handler):
|
||||
module=entry["module"],
|
||||
message=entry["message"],
|
||||
is_anomaly=entry["is_anomaly"],
|
||||
anomaly_type=entry["anomaly_type"]
|
||||
anomaly_type=entry["anomaly_type"],
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error inserting log: {e}")
|
||||
|
||||
# Periodic cleanup of old logs (only every 100 flushes or similar?
|
||||
|
||||
# Periodic cleanup of old logs (only every 100 flushes or similar?
|
||||
# for now let's just keep it here but it should be fast)
|
||||
try:
|
||||
self.database.debug_logs.cleanup_old_logs()
|
||||
except Exception as e:
|
||||
print(f"Error cleaning up logs: {e}")
|
||||
|
||||
|
||||
self.last_flush_time = time.time()
|
||||
except Exception as e:
|
||||
print(f"Failed to flush logs to database: {e}")
|
||||
finally:
|
||||
self.flush_lock.release()
|
||||
|
||||
def get_logs(self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None):
|
||||
def get_logs(
|
||||
self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None
|
||||
):
|
||||
if self.database:
|
||||
# Flush current buffer first to ensure we have latest logs
|
||||
self._flush_to_db()
|
||||
|
||||
|
||||
with self.lock:
|
||||
if self.database:
|
||||
return self.database.debug_logs.get_logs(
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
search=search,
|
||||
level=level,
|
||||
module=module,
|
||||
is_anomaly=is_anomaly
|
||||
limit=limit,
|
||||
offset=offset,
|
||||
search=search,
|
||||
level=level,
|
||||
module=module,
|
||||
is_anomaly=is_anomaly,
|
||||
)
|
||||
else:
|
||||
# Fallback to in-memory buffer if DB not yet available
|
||||
logs = list(self.logs_buffer)
|
||||
if search:
|
||||
logs = [l for l in logs if search.lower() in l["message"].lower() or search.lower() in l["module"].lower()]
|
||||
logs = [
|
||||
log
|
||||
for log in logs
|
||||
if search.lower() in log["message"].lower()
|
||||
or search.lower() in log["module"].lower()
|
||||
]
|
||||
if level:
|
||||
logs = [l for l in logs if l["level"] == level]
|
||||
logs = [log for log in logs if log["level"] == level]
|
||||
if is_anomaly is not None:
|
||||
logs = [l for l in logs if l["is_anomaly"] == (1 if is_anomaly else 0)]
|
||||
|
||||
logs = [
|
||||
log
|
||||
for log in logs
|
||||
if log["is_anomaly"] == (1 if is_anomaly else 0)
|
||||
]
|
||||
|
||||
# Sort descending
|
||||
logs.sort(key=lambda x: x["timestamp"], reverse=True)
|
||||
return logs[offset : offset + limit]
|
||||
@@ -155,11 +172,7 @@ class PersistentLogHandler(logging.Handler):
|
||||
with self.lock:
|
||||
if self.database:
|
||||
return self.database.debug_logs.get_total_count(
|
||||
search=search,
|
||||
level=level,
|
||||
module=module,
|
||||
is_anomaly=is_anomaly
|
||||
search=search, level=level, module=module, is_anomaly=is_anomaly
|
||||
)
|
||||
else:
|
||||
return len(self.logs_buffer)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user