refactor(meshchat): clean up code formatting and enhance version retrieval for LXST; improve log handling and anomaly detection logic
This commit is contained in:
@@ -86,7 +86,6 @@ from meshchatx.src.backend.sideband_commands import SidebandCommands
|
|||||||
from meshchatx.src.backend.telemetry_utils import Telemeter
|
from meshchatx.src.backend.telemetry_utils import Telemeter
|
||||||
from meshchatx.src.version import __version__ as app_version
|
from meshchatx.src.version import __version__ as app_version
|
||||||
|
|
||||||
import collections
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
|
||||||
@@ -1166,6 +1165,12 @@ class ReticulumMeshChat:
|
|||||||
def get_app_version() -> str:
|
def get_app_version() -> str:
|
||||||
return app_version
|
return app_version
|
||||||
|
|
||||||
|
def get_lxst_version(self) -> str:
|
||||||
|
try:
|
||||||
|
return importlib.metadata.version("lxst")
|
||||||
|
except Exception:
|
||||||
|
return getattr(LXST, "__version__", "unknown")
|
||||||
|
|
||||||
# automatically announces based on user config
|
# automatically announces based on user config
|
||||||
async def announce_loop(self, session_id, context=None):
|
async def announce_loop(self, session_id, context=None):
|
||||||
ctx = context or self.current_context
|
ctx = context or self.current_context
|
||||||
@@ -3095,7 +3100,7 @@ class ReticulumMeshChat:
|
|||||||
"version": self.get_app_version(),
|
"version": self.get_app_version(),
|
||||||
"lxmf_version": LXMF.__version__,
|
"lxmf_version": LXMF.__version__,
|
||||||
"rns_version": RNS.__version__,
|
"rns_version": RNS.__version__,
|
||||||
"lxst_version": getattr(LXST, "__version__", "unknown"),
|
"lxst_version": self.get_lxst_version(),
|
||||||
"python_version": platform.python_version(),
|
"python_version": platform.python_version(),
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"aiohttp": importlib.metadata.version("aiohttp"),
|
"aiohttp": importlib.metadata.version("aiohttp"),
|
||||||
@@ -3925,8 +3930,13 @@ class ReticulumMeshChat:
|
|||||||
remote_identity_hash = d.get("remote_identity_hash")
|
remote_identity_hash = d.get("remote_identity_hash")
|
||||||
if remote_identity_hash:
|
if remote_identity_hash:
|
||||||
# try to resolve name if unknown or missing
|
# try to resolve name if unknown or missing
|
||||||
if not d.get("remote_identity_name") or d.get("remote_identity_name") == "Unknown":
|
if (
|
||||||
resolved_name = self.get_name_for_identity_hash(remote_identity_hash)
|
not d.get("remote_identity_name")
|
||||||
|
or d.get("remote_identity_name") == "Unknown"
|
||||||
|
):
|
||||||
|
resolved_name = self.get_name_for_identity_hash(
|
||||||
|
remote_identity_hash
|
||||||
|
)
|
||||||
if resolved_name:
|
if resolved_name:
|
||||||
d["remote_identity_name"] = resolved_name
|
d["remote_identity_name"] = resolved_name
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,9 @@ class DebugLogsDAO:
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_logs(self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None):
|
def get_logs(
|
||||||
|
self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None
|
||||||
|
):
|
||||||
sql = "SELECT * FROM debug_logs WHERE 1=1"
|
sql = "SELECT * FROM debug_logs WHERE 1=1"
|
||||||
params = []
|
params = []
|
||||||
|
|
||||||
@@ -80,8 +82,9 @@ class DebugLogsDAO:
|
|||||||
row = self.provider.fetchone(sql, (max_logs - 1,))
|
row = self.provider.fetchone(sql, (max_logs - 1,))
|
||||||
if row:
|
if row:
|
||||||
cutoff_ts = row["timestamp"]
|
cutoff_ts = row["timestamp"]
|
||||||
self.provider.execute("DELETE FROM debug_logs WHERE timestamp < ?", (cutoff_ts,))
|
self.provider.execute(
|
||||||
|
"DELETE FROM debug_logs WHERE timestamp < ?", (cutoff_ts,)
|
||||||
|
)
|
||||||
|
|
||||||
def get_anomalies(self, limit=50):
|
def get_anomalies(self, limit=50):
|
||||||
return self.get_logs(limit=limit, is_anomaly=True)
|
return self.get_logs(limit=limit, is_anomaly=True)
|
||||||
|
|
||||||
|
|||||||
@@ -107,11 +107,13 @@ class DatabaseProvider:
|
|||||||
|
|
||||||
def fetchone(self, query, params=None):
|
def fetchone(self, query, params=None):
|
||||||
cursor = self.execute(query, params)
|
cursor = self.execute(query, params)
|
||||||
return cursor.fetchone()
|
row = cursor.fetchone()
|
||||||
|
return dict(row) if row else None
|
||||||
|
|
||||||
def fetchall(self, query, params=None):
|
def fetchall(self, query, params=None):
|
||||||
cursor = self.execute(query, params)
|
cursor = self.execute(query, params)
|
||||||
return cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
|
return [dict(row) for row in rows]
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
if hasattr(self._local, "connection"):
|
if hasattr(self._local, "connection"):
|
||||||
|
|||||||
@@ -86,10 +86,9 @@ class DocsManager:
|
|||||||
dest_path = os.path.join(self.meshchatx_docs_dir, file)
|
dest_path = os.path.join(self.meshchatx_docs_dir, file)
|
||||||
|
|
||||||
# Only copy if source and destination are different
|
# Only copy if source and destination are different
|
||||||
if (
|
if os.path.abspath(src_path) != os.path.abspath(
|
||||||
os.path.abspath(src_path) != os.path.abspath(dest_path)
|
dest_path
|
||||||
and os.access(self.meshchatx_docs_dir, os.W_OK)
|
) and os.access(self.meshchatx_docs_dir, os.W_OK):
|
||||||
):
|
|
||||||
shutil.copy2(src_path, dest_path)
|
shutil.copy2(src_path, dest_path)
|
||||||
|
|
||||||
# Also pre-render to HTML for easy sharing/viewing
|
# Also pre-render to HTML for easy sharing/viewing
|
||||||
|
|||||||
@@ -69,10 +69,16 @@ class IntegrityManager:
|
|||||||
m_date = manifest.get("date", "Unknown")
|
m_date = manifest.get("date", "Unknown")
|
||||||
m_time = manifest.get("time", "Unknown")
|
m_time = manifest.get("time", "Unknown")
|
||||||
m_id = manifest.get("identity", "Unknown")
|
m_id = manifest.get("identity", "Unknown")
|
||||||
issues.insert(0, f"Last integrity snapshot: {m_date} {m_time} (Identity: {m_id})")
|
issues.insert(
|
||||||
|
0, f"Last integrity snapshot: {m_date} {m_time} (Identity: {m_id})"
|
||||||
|
)
|
||||||
|
|
||||||
# Check if identity matches
|
# Check if identity matches
|
||||||
if self.identity_hash and m_id != "Unknown" and self.identity_hash != m_id:
|
if (
|
||||||
|
self.identity_hash
|
||||||
|
and m_id != "Unknown"
|
||||||
|
and self.identity_hash != m_id
|
||||||
|
):
|
||||||
issues.append(f"Identity mismatch! Manifest belongs to: {m_id}")
|
issues.append(f"Identity mismatch! Manifest belongs to: {m_id}")
|
||||||
|
|
||||||
self.issues = issues
|
self.issues = issues
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ class PersistentLogHandler(logging.Handler):
|
|||||||
# Anomaly detection state
|
# Anomaly detection state
|
||||||
self.recent_messages = collections.deque(maxlen=100)
|
self.recent_messages = collections.deque(maxlen=100)
|
||||||
self.flooding_threshold = 20 # messages per second
|
self.flooding_threshold = 20 # messages per second
|
||||||
self.repeat_threshold = 5 # identical messages in a row
|
self.repeat_threshold = 5 # identical messages in a row
|
||||||
self.message_counts = collections.defaultdict(int)
|
self.message_counts = collections.defaultdict(int)
|
||||||
self.last_reset_time = time.time()
|
self.last_reset_time = time.time()
|
||||||
|
|
||||||
@@ -46,13 +46,19 @@ class PersistentLogHandler(logging.Handler):
|
|||||||
self.logs_buffer.append(log_entry)
|
self.logs_buffer.append(log_entry)
|
||||||
|
|
||||||
# Periodically flush to database if available
|
# Periodically flush to database if available
|
||||||
if self.database and (time.time() - self.last_flush_time > self.flush_interval):
|
if self.database and (
|
||||||
|
time.time() - self.last_flush_time > self.flush_interval
|
||||||
|
):
|
||||||
self._flush_to_db()
|
self._flush_to_db()
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
self.handleError(record)
|
self.handleError(record)
|
||||||
|
|
||||||
def _detect_anomaly(self, record, message, timestamp):
|
def _detect_anomaly(self, record, message, timestamp):
|
||||||
|
# Only detect anomalies for WARNING level and above
|
||||||
|
if record.levelno < logging.WARNING:
|
||||||
|
return False, None
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
|
|
||||||
# 1. Detect Log Flooding
|
# 1. Detect Log Flooding
|
||||||
@@ -104,7 +110,7 @@ class PersistentLogHandler(logging.Handler):
|
|||||||
module=entry["module"],
|
module=entry["module"],
|
||||||
message=entry["message"],
|
message=entry["message"],
|
||||||
is_anomaly=entry["is_anomaly"],
|
is_anomaly=entry["is_anomaly"],
|
||||||
anomaly_type=entry["anomaly_type"]
|
anomaly_type=entry["anomaly_type"],
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error inserting log: {e}")
|
print(f"Error inserting log: {e}")
|
||||||
@@ -122,7 +128,9 @@ class PersistentLogHandler(logging.Handler):
|
|||||||
finally:
|
finally:
|
||||||
self.flush_lock.release()
|
self.flush_lock.release()
|
||||||
|
|
||||||
def get_logs(self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None):
|
def get_logs(
|
||||||
|
self, limit=100, offset=0, search=None, level=None, module=None, is_anomaly=None
|
||||||
|
):
|
||||||
if self.database:
|
if self.database:
|
||||||
# Flush current buffer first to ensure we have latest logs
|
# Flush current buffer first to ensure we have latest logs
|
||||||
self._flush_to_db()
|
self._flush_to_db()
|
||||||
@@ -135,17 +143,26 @@ class PersistentLogHandler(logging.Handler):
|
|||||||
search=search,
|
search=search,
|
||||||
level=level,
|
level=level,
|
||||||
module=module,
|
module=module,
|
||||||
is_anomaly=is_anomaly
|
is_anomaly=is_anomaly,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# Fallback to in-memory buffer if DB not yet available
|
# Fallback to in-memory buffer if DB not yet available
|
||||||
logs = list(self.logs_buffer)
|
logs = list(self.logs_buffer)
|
||||||
if search:
|
if search:
|
||||||
logs = [l for l in logs if search.lower() in l["message"].lower() or search.lower() in l["module"].lower()]
|
logs = [
|
||||||
|
log
|
||||||
|
for log in logs
|
||||||
|
if search.lower() in log["message"].lower()
|
||||||
|
or search.lower() in log["module"].lower()
|
||||||
|
]
|
||||||
if level:
|
if level:
|
||||||
logs = [l for l in logs if l["level"] == level]
|
logs = [log for log in logs if log["level"] == level]
|
||||||
if is_anomaly is not None:
|
if is_anomaly is not None:
|
||||||
logs = [l for l in logs if l["is_anomaly"] == (1 if is_anomaly else 0)]
|
logs = [
|
||||||
|
log
|
||||||
|
for log in logs
|
||||||
|
if log["is_anomaly"] == (1 if is_anomaly else 0)
|
||||||
|
]
|
||||||
|
|
||||||
# Sort descending
|
# Sort descending
|
||||||
logs.sort(key=lambda x: x["timestamp"], reverse=True)
|
logs.sort(key=lambda x: x["timestamp"], reverse=True)
|
||||||
@@ -155,11 +172,7 @@ class PersistentLogHandler(logging.Handler):
|
|||||||
with self.lock:
|
with self.lock:
|
||||||
if self.database:
|
if self.database:
|
||||||
return self.database.debug_logs.get_total_count(
|
return self.database.debug_logs.get_total_count(
|
||||||
search=search,
|
search=search, level=level, module=module, is_anomaly=is_anomaly
|
||||||
level=level,
|
|
||||||
module=module,
|
|
||||||
is_anomaly=is_anomaly
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return len(self.logs_buffer)
|
return len(self.logs_buffer)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user