From 666c90875aa810547703b5d00d304e8dd1917a4f Mon Sep 17 00:00:00 2001
From: Sudo-Ivan
Date: Mon, 5 Jan 2026 17:38:52 -0600
Subject: [PATCH] interface discovery, folders for messages, map nodes from
discovery, maintenance tools.
---
meshchatx/meshchat.py | 372 ++++++++++-
meshchatx/src/backend/bot_handler.py | 4 +-
meshchatx/src/backend/config_manager.py | 1 +
meshchatx/src/backend/database/__init__.py | 54 +-
meshchatx/src/backend/database/announces.py | 18 +
meshchatx/src/backend/database/messages.py | 85 +++
meshchatx/src/backend/database/schema.py | 46 +-
meshchatx/src/backend/docs_manager.py | 9 +-
meshchatx/src/backend/message_handler.py | 18 +
.../src/frontend/components/TutorialModal.vue | 567 ++++++++++++++--
.../frontend/components/about/AboutPage.vue | 308 +++++++--
.../components/interfaces/InterfacesPage.vue | 612 ++++++++++++++----
.../src/frontend/components/map/MapPage.vue | 158 ++++-
.../components/messages/MessagesPage.vue | 141 ++++
.../components/messages/MessagesSidebar.vue | 451 ++++++++++++-
.../components/settings/SettingsPage.vue | 310 ++++++++-
.../frontend/components/tools/ToolsPage.vue | 70 +-
meshchatx/src/frontend/locales/de.json | 44 +-
meshchatx/src/frontend/locales/en.json | 44 +-
meshchatx/src/frontend/locales/ru.json | 44 +-
meshchatx/src/frontend/style.css | 27 +
tests/backend/test_identity_restore.py | 85 +++
tests/backend/test_maintenance.py | 64 ++
tests/backend/test_message_handler.py | 12 +-
tests/frontend/MessagesSidebar.test.js | 20 +-
tests/frontend/Performance.test.js | 2 +-
26 files changed, 3272 insertions(+), 294 deletions(-)
create mode 100644 tests/backend/test_identity_restore.py
create mode 100644 tests/backend/test_maintenance.py
diff --git a/meshchatx/meshchat.py b/meshchatx/meshchat.py
index d5a2c26..ed27040 100644
--- a/meshchatx/meshchat.py
+++ b/meshchatx/meshchat.py
@@ -42,6 +42,7 @@ from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
+from RNS.Discovery import InterfaceDiscovery
from serial.tools import list_ports
from meshchatx.src.backend.async_utils import AsyncUtils
@@ -2152,8 +2153,37 @@ class ReticulumMeshChat:
@routes.get("/api/v1/database/snapshots")
async def list_db_snapshots(request):
try:
+ limit = int(request.query.get("limit", 100))
+ offset = int(request.query.get("offset", 0))
snapshots = self.database.list_snapshots(self.storage_dir)
- return web.json_response(snapshots)
+ total = len(snapshots)
+ paginated_snapshots = snapshots[offset : offset + limit]
+ return web.json_response(
+ {
+ "snapshots": paginated_snapshots,
+ "total": total,
+ "limit": limit,
+ "offset": offset,
+ },
+ )
+ except Exception as e:
+ return web.json_response(
+ {"status": "error", "message": str(e)},
+ status=500,
+ )
+
+ @routes.delete("/api/v1/database/snapshots/{filename}")
+ async def delete_db_snapshot(request):
+ try:
+ filename = request.match_info.get("filename")
+ if not filename.endswith(".zip"):
+ filename += ".zip"
+ self.database.delete_snapshot_or_backup(
+ self.storage_dir,
+ filename,
+ is_backup=False,
+ )
+ return web.json_response({"status": "success"})
except Exception as e:
return web.json_response(
{"status": "error", "message": str(e)},
@@ -2199,9 +2229,13 @@ class ReticulumMeshChat:
@routes.get("/api/v1/database/backups")
async def list_db_backups(request):
try:
+ limit = int(request.query.get("limit", 100))
+ offset = int(request.query.get("offset", 0))
backup_dir = os.path.join(self.storage_dir, "database-backups")
if not os.path.exists(backup_dir):
- return web.json_response([])
+ return web.json_response(
+ {"backups": [], "total": 0, "limit": limit, "offset": offset},
+ )
backups = []
for file in os.listdir(backup_dir):
@@ -2219,9 +2253,39 @@ class ReticulumMeshChat:
).isoformat(),
},
)
- return web.json_response(
- sorted(backups, key=lambda x: x["created_at"], reverse=True),
+ sorted_backups = sorted(
+ backups,
+ key=lambda x: x["created_at"],
+ reverse=True,
)
+ total = len(sorted_backups)
+ paginated_backups = sorted_backups[offset : offset + limit]
+ return web.json_response(
+ {
+ "backups": paginated_backups,
+ "total": total,
+ "limit": limit,
+ "offset": offset,
+ },
+ )
+ except Exception as e:
+ return web.json_response(
+ {"status": "error", "message": str(e)},
+ status=500,
+ )
+
+ @routes.delete("/api/v1/database/backups/{filename}")
+ async def delete_db_backup(request):
+ try:
+ filename = request.match_info.get("filename")
+ if not filename.endswith(".zip"):
+ filename += ".zip"
+ self.database.delete_snapshot_or_backup(
+ self.storage_dir,
+ filename,
+ is_backup=True,
+ )
+ return web.json_response({"status": "success"})
except Exception as e:
return web.json_response(
{"status": "error", "message": str(e)},
@@ -3360,6 +3424,7 @@ class ReticulumMeshChat:
),
"ply": self.get_package_version("ply"),
"bcrypt": self.get_package_version("bcrypt"),
+ "lxmfy": self.get_package_version("lxmfy"),
},
"storage_path": self.storage_path,
"database_path": self.database_path,
@@ -3939,6 +4004,62 @@ class ReticulumMeshChat:
status=500,
)
+ # maintenance - clear messages
+ @routes.delete("/api/v1/maintenance/messages")
+ async def maintenance_clear_messages(request):
+ self.database.messages.delete_all_lxmf_messages()
+ return web.json_response({"message": "All messages cleared"})
+
+ # maintenance - clear announces
+ @routes.delete("/api/v1/maintenance/announces")
+ async def maintenance_clear_announces(request):
+ aspect = request.query.get("aspect")
+ self.database.announces.delete_all_announces(aspect=aspect)
+ return web.json_response(
+ {
+ "message": f"Announces cleared{' for aspect ' + aspect if aspect else ''}",
+ },
+ )
+
+ # maintenance - clear favorites
+ @routes.delete("/api/v1/maintenance/favourites")
+ async def maintenance_clear_favourites(request):
+ aspect = request.query.get("aspect")
+ self.database.announces.delete_all_favourites(aspect=aspect)
+ return web.json_response(
+ {
+ "message": f"Favourites cleared{' for aspect ' + aspect if aspect else ''}",
+ },
+ )
+
+ # maintenance - clear archives
+ @routes.delete("/api/v1/maintenance/archives")
+ async def maintenance_clear_archives(request):
+ self.database.misc.delete_archived_pages()
+ return web.json_response({"message": "All archived pages cleared"})
+
+ # maintenance - export messages
+ @routes.get("/api/v1/maintenance/messages/export")
+ async def maintenance_export_messages(request):
+ messages = self.database.messages.get_all_lxmf_messages()
+ # Convert sqlite3.Row to dict if necessary
+ messages_list = [dict(m) for m in messages]
+ return web.json_response({"messages": messages_list})
+
+ # maintenance - import messages
+ @routes.post("/api/v1/maintenance/messages/import")
+ async def maintenance_import_messages(request):
+ try:
+ data = await request.json()
+ messages = data.get("messages", [])
+ for msg in messages:
+ self.database.messages.upsert_lxmf_message(msg)
+ return web.json_response(
+ {"message": f"Successfully imported {len(messages)} messages"},
+ )
+ except Exception as e:
+ return web.json_response({"error": str(e)}, status=400)
+
# get config
@routes.get("/api/v1/config")
async def config_get(request):
@@ -4043,6 +4164,91 @@ class ReticulumMeshChat:
return web.json_response({"discovery": discovery_config})
+ @routes.get("/api/v1/reticulum/discovered-interfaces")
+ async def reticulum_discovered_interfaces(request):
+ try:
+ discovery = InterfaceDiscovery(discover_interfaces=False)
+ interfaces = discovery.list_discovered_interfaces()
+ active = []
+ try:
+ if hasattr(self, "reticulum") and self.reticulum:
+ stats = self.reticulum.get_interface_stats().get(
+ "interfaces",
+ [],
+ )
+ active = []
+ for s in stats:
+ name = s.get("name") or ""
+ parsed_host = None
+ parsed_port = None
+ if "/" in name:
+ try:
+ host_port = name.split("/")[-1].strip("[]")
+ if ":" in host_port:
+ parsed_host, parsed_port = host_port.rsplit(
+ ":",
+ 1,
+ )
+ try:
+ parsed_port = int(parsed_port)
+ except Exception:
+ parsed_port = None
+ else:
+ parsed_host = host_port
+ except Exception:
+ parsed_host = None
+ parsed_port = None
+
+ host = (
+ s.get("target_host") or s.get("remote") or parsed_host
+ )
+ port = (
+ s.get("target_port")
+ or s.get("listen_port")
+ or parsed_port
+ )
+ transport_id = s.get("transport_id")
+ if isinstance(transport_id, (bytes, bytearray)):
+ transport_id = transport_id.hex()
+
+ active.append(
+ {
+ "name": name,
+ "short_name": s.get("short_name"),
+ "type": s.get("type"),
+ "target_host": host,
+ "target_port": port,
+ "listen_ip": s.get("listen_ip"),
+ "connected": s.get("connected"),
+ "online": s.get("online"),
+ "transport_id": transport_id,
+ "network_id": s.get("network_id"),
+ },
+ )
+ except Exception as e:
+ logger.debug(f"Failed to get interface stats: {e}")
+
+ def to_jsonable(obj):
+ if isinstance(obj, bytes):
+ return obj.hex()
+ if isinstance(obj, dict):
+ return {k: to_jsonable(v) for k, v in obj.items()}
+ if isinstance(obj, list):
+ return [to_jsonable(v) for v in obj]
+ return obj
+
+ return web.json_response(
+ {
+ "interfaces": to_jsonable(interfaces),
+ "active": to_jsonable(active),
+ },
+ )
+ except Exception as e:
+ return web.json_response(
+ {"message": f"Failed to load discovered interfaces: {e!s}"},
+ status=500,
+ )
+
# enable transport mode
@routes.post("/api/v1/reticulum/enable-transport")
async def reticulum_enable_transport(request):
@@ -6920,6 +7126,12 @@ class ReticulumMeshChat:
request.query.get("filter_has_attachments", "false"),
),
)
+ folder_id = request.query.get("folder_id")
+ if folder_id is not None:
+ try:
+ folder_id = int(folder_id)
+ except ValueError:
+ folder_id = None
# get pagination params
try:
@@ -6943,6 +7155,7 @@ class ReticulumMeshChat:
filter_unread=filter_unread,
filter_failed=filter_failed,
filter_has_attachments=filter_has_attachments,
+ folder_id=folder_id,
limit=limit,
offset=offset,
)
@@ -7021,6 +7234,123 @@ class ReticulumMeshChat:
},
)
+ @routes.get("/api/v1/lxmf/folders")
+ async def lxmf_folders_get(request):
+ folders = self.database.messages.get_all_folders()
+ return web.json_response([dict(f) for f in folders])
+
+ @routes.post("/api/v1/lxmf/folders")
+ async def lxmf_folders_post(request):
+ data = await request.json()
+ name = data.get("name")
+ if not name:
+ return web.json_response({"message": "Name is required"}, status=400)
+ try:
+ self.database.messages.create_folder(name)
+ return web.json_response({"message": "Folder created"})
+ except Exception as e:
+ return web.json_response({"message": str(e)}, status=500)
+
+ @routes.patch("/api/v1/lxmf/folders/{id}")
+ async def lxmf_folders_patch(request):
+ folder_id = int(request.match_info["id"])
+ data = await request.json()
+ name = data.get("name")
+ if not name:
+ return web.json_response({"message": "Name is required"}, status=400)
+ self.database.messages.rename_folder(folder_id, name)
+ return web.json_response({"message": "Folder renamed"})
+
+ @routes.delete("/api/v1/lxmf/folders/{id}")
+ async def lxmf_folders_delete(request):
+ folder_id = int(request.match_info["id"])
+ self.database.messages.delete_folder(folder_id)
+ return web.json_response({"message": "Folder deleted"})
+
+ @routes.post("/api/v1/lxmf/conversations/move-to-folder")
+ async def lxmf_conversations_move_to_folder(request):
+ data = await request.json()
+ peer_hashes = data.get("peer_hashes", [])
+ folder_id = data.get("folder_id") # Can be None to remove from folder
+ if not peer_hashes:
+ return web.json_response(
+ {"message": "peer_hashes is required"},
+ status=400,
+ )
+ self.database.messages.move_conversations_to_folder(peer_hashes, folder_id)
+ return web.json_response({"message": "Conversations moved"})
+
+ @routes.post("/api/v1/lxmf/conversations/bulk-mark-as-read")
+ async def lxmf_conversations_bulk_mark_read(request):
+ data = await request.json()
+ destination_hashes = data.get("destination_hashes", [])
+ if not destination_hashes:
+ return web.json_response(
+ {"message": "destination_hashes is required"},
+ status=400,
+ )
+ self.database.messages.mark_conversations_as_read(destination_hashes)
+ return web.json_response({"message": "Conversations marked as read"})
+
+ @routes.post("/api/v1/lxmf/conversations/bulk-delete")
+ async def lxmf_conversations_bulk_delete(request):
+ data = await request.json()
+ destination_hashes = data.get("destination_hashes", [])
+ if not destination_hashes:
+ return web.json_response(
+ {"message": "destination_hashes is required"},
+ status=400,
+ )
+ local_hash = self.local_lxmf_destination.hexhash
+ for dest_hash in destination_hashes:
+ self.message_handler.delete_conversation(local_hash, dest_hash)
+ return web.json_response({"message": "Conversations deleted"})
+
+ @routes.get("/api/v1/lxmf/folders/export")
+ async def lxmf_folders_export(request):
+ folders = [dict(f) for f in self.database.messages.get_all_folders()]
+ mappings = [
+ dict(m) for m in self.database.messages.get_all_conversation_folders()
+ ]
+ return web.json_response({"folders": folders, "mappings": mappings})
+
+ @routes.post("/api/v1/lxmf/folders/import")
+ async def lxmf_folders_import(request):
+ data = await request.json()
+ folders = data.get("folders", [])
+ mappings = data.get("mappings", [])
+
+ # We'll try to recreate folders by name to avoid ID conflicts
+ folder_name_to_new_id = {}
+ for f in folders:
+ try:
+ self.database.messages.create_folder(f["name"])
+ except Exception as e:
+ logger.debug(f"Folder '{f['name']}' likely already exists: {e}")
+
+ # Refresh folder list to get new IDs
+ all_folders = self.database.messages.get_all_folders()
+ for f in all_folders:
+ folder_name_to_new_id[f["name"]] = f["id"]
+
+ # Map old IDs to new IDs if possible, or just use names if we had them
+ # Since IDs might change, we should have exported names too
+ # Let's assume the export had folder names in mappings or we match by old folder info
+ old_id_to_name = {f["id"]: f["name"] for f in folders}
+
+ for m in mappings:
+ peer_hash = m["peer_hash"]
+ old_folder_id = m["folder_id"]
+ folder_name = old_id_to_name.get(old_folder_id)
+ if folder_name and folder_name in folder_name_to_new_id:
+ new_folder_id = folder_name_to_new_id[folder_name]
+ self.database.messages.move_conversation_to_folder(
+ peer_hash,
+ new_folder_id,
+ )
+
+ return web.json_response({"message": "Folders and mappings imported"})
+
# mark lxmf conversation as read
@routes.get("/api/v1/lxmf/conversations/{destination_hash}/mark-as-read")
async def lxmf_conversations_mark_read(request):
@@ -7806,7 +8136,7 @@ class ReticulumMeshChat:
f"connect-src {' '.join(connect_sources)}; "
"media-src 'self' blob:; "
"worker-src 'self' blob:; "
- "frame-src 'self'; "
+ "frame-src 'self' https://reticulum.network; "
"object-src 'none'; "
"base-uri 'self';"
)
@@ -7922,17 +8252,24 @@ class ReticulumMeshChat:
# (e.g. when running from a read-only AppImage)
if self.current_context and hasattr(self.current_context, "docs_manager"):
dm = self.current_context.docs_manager
- if (
- dm.docs_dir
- and os.path.exists(dm.docs_dir)
- and not dm.docs_dir.startswith(public_dir)
- ):
- app.router.add_static(
- "/reticulum-docs/",
- dm.docs_dir,
- name="reticulum_docs_storage",
- follow_symlinks=True,
- )
+
+ # Custom handler for reticulum docs to allow fallback to official website
+ async def reticulum_docs_handler(request):
+ path = request.match_info.get("filename", "index.html")
+ if not path:
+ path = "index.html"
+ if path.endswith("/"):
+ path += "index.html"
+
+ local_path = os.path.join(dm.docs_dir, path)
+ if os.path.exists(local_path) and os.path.isfile(local_path):
+ return web.FileResponse(local_path)
+
+ # Fallback to official website
+ return web.HTTPFound(f"https://reticulum.network/manual/{path}")
+
+ app.router.add_get("/reticulum-docs/{filename:.*}", reticulum_docs_handler)
+
if (
dm.meshchatx_docs_dir
and os.path.exists(dm.meshchatx_docs_dir)
@@ -7978,7 +8315,8 @@ class ReticulumMeshChat:
print(
f"Performing scheduled auto-backup for {ctx.identity_hash}...",
)
- ctx.database.backup_database(self.storage_dir)
+ max_count = ctx.config.backup_max_count.get()
+ ctx.database.backup_database(self.storage_dir, max_count=max_count)
except Exception as e:
print(f"Auto-backup failed: {e}")
diff --git a/meshchatx/src/backend/bot_handler.py b/meshchatx/src/backend/bot_handler.py
index 576fef9..e5b99a1 100644
--- a/meshchatx/src/backend/bot_handler.py
+++ b/meshchatx/src/backend/bot_handler.py
@@ -240,7 +240,9 @@ class BotHandler:
shutil.rmtree(storage_dir)
except Exception as exc:
logger.warning(
- "Failed to delete storage dir for bot %s: %s", bot_id, exc
+ "Failed to delete storage dir for bot %s: %s",
+ bot_id,
+ exc,
)
self._save_state()
diff --git a/meshchatx/src/backend/config_manager.py b/meshchatx/src/backend/config_manager.py
index e8ab093..cb4593f 100644
--- a/meshchatx/src/backend/config_manager.py
+++ b/meshchatx/src/backend/config_manager.py
@@ -103,6 +103,7 @@ class ConfigManager:
"archives_max_storage_gb",
1,
)
+ self.backup_max_count = self.IntConfig(self, "backup_max_count", 5)
self.crawler_enabled = self.BoolConfig(self, "crawler_enabled", False)
self.crawler_max_retries = self.IntConfig(self, "crawler_max_retries", 3)
self.crawler_retry_delay_seconds = self.IntConfig(
diff --git a/meshchatx/src/backend/database/__init__.py b/meshchatx/src/backend/database/__init__.py
index 29d2ffc..6ec139a 100644
--- a/meshchatx/src/backend/database/__init__.py
+++ b/meshchatx/src/backend/database/__init__.py
@@ -211,14 +211,41 @@ class Database:
"size": os.path.getsize(backup_path),
}
- def backup_database(self, storage_path, backup_path: str | None = None):
+ def backup_database(
+ self,
+ storage_path,
+ backup_path: str | None = None,
+ max_count: int | None = None,
+ ):
default_dir = os.path.join(storage_path, "database-backups")
os.makedirs(default_dir, exist_ok=True)
if backup_path is None:
timestamp = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
backup_path = os.path.join(default_dir, f"backup-{timestamp}.zip")
- return self._backup_to_zip(backup_path)
+ result = self._backup_to_zip(backup_path)
+
+ # Cleanup old backups if a limit is set
+ if max_count is not None and max_count > 0:
+ try:
+ backups = []
+ for file in os.listdir(default_dir):
+ if file.endswith(".zip"):
+ full_path = os.path.join(default_dir, file)
+ stats = os.stat(full_path)
+ backups.append((full_path, stats.st_mtime))
+
+ if len(backups) > max_count:
+ # Sort by modification time (oldest first)
+ backups.sort(key=lambda x: x[1])
+ to_delete = backups[: len(backups) - max_count]
+ for path, _ in to_delete:
+ if os.path.exists(path):
+ os.remove(path)
+ except Exception as e:
+ print(f"Failed to cleanup old backups: {e}")
+
+ return result
def create_snapshot(self, storage_path, name: str):
"""Creates a named snapshot of the database."""
@@ -258,6 +285,29 @@ class Database:
)
return sorted(snapshots, key=lambda x: x["created_at"], reverse=True)
+ def delete_snapshot_or_backup(
+ self,
+ storage_path,
+ filename: str,
+ is_backup: bool = False,
+ ):
+ """Deletes a database snapshot or auto-backup."""
+ base_dir = "database-backups" if is_backup else "snapshots"
+ file_path = os.path.join(storage_path, base_dir, filename)
+
+ # Basic security check to ensure we stay within the intended directory
+ abs_path = os.path.abspath(file_path)
+ abs_base = os.path.abspath(os.path.join(storage_path, base_dir))
+
+ if not abs_path.startswith(abs_base):
+ msg = "Invalid path"
+ raise ValueError(msg)
+
+ if os.path.exists(abs_path):
+ os.remove(abs_path)
+ return True
+ return False
+
def restore_database(self, backup_path: str):
if not os.path.exists(backup_path):
msg = f"Backup not found at {backup_path}"
diff --git a/meshchatx/src/backend/database/announces.py b/meshchatx/src/backend/database/announces.py
index 080a1bc..a1b626e 100644
--- a/meshchatx/src/backend/database/announces.py
+++ b/meshchatx/src/backend/database/announces.py
@@ -54,6 +54,15 @@ class AnnounceDAO:
(destination_hash,),
)
+ def delete_all_announces(self, aspect=None):
+ if aspect:
+ self.provider.execute(
+ "DELETE FROM announces WHERE aspect = ?",
+ (aspect,),
+ )
+ else:
+ self.provider.execute("DELETE FROM announces")
+
def get_filtered_announces(
self,
aspect=None,
@@ -137,3 +146,12 @@ class AnnounceDAO:
"DELETE FROM favourite_destinations WHERE destination_hash = ?",
(destination_hash,),
)
+
+ def delete_all_favourites(self, aspect=None):
+ if aspect:
+ self.provider.execute(
+ "DELETE FROM favourite_destinations WHERE aspect = ?",
+ (aspect,),
+ )
+ else:
+ self.provider.execute("DELETE FROM favourite_destinations")
diff --git a/meshchatx/src/backend/database/messages.py b/meshchatx/src/backend/database/messages.py
index 442be84..618d532 100644
--- a/meshchatx/src/backend/database/messages.py
+++ b/meshchatx/src/backend/database/messages.py
@@ -63,12 +63,28 @@ class MessageDAO:
(message_hash,),
)
+ def delete_lxmf_messages_by_hashes(self, message_hashes):
+ if not message_hashes:
+ return
+ placeholders = ", ".join(["?"] * len(message_hashes))
+ self.provider.execute(
+ f"DELETE FROM lxmf_messages WHERE hash IN ({placeholders})",
+ tuple(message_hashes),
+ )
+
def delete_lxmf_message_by_hash(self, message_hash):
self.provider.execute(
"DELETE FROM lxmf_messages WHERE hash = ?",
(message_hash,),
)
+ def delete_all_lxmf_messages(self):
+ self.provider.execute("DELETE FROM lxmf_messages")
+ self.provider.execute("DELETE FROM lxmf_conversation_read_state")
+
+ def get_all_lxmf_messages(self):
+ return self.provider.fetchall("SELECT * FROM lxmf_messages")
+
def get_conversation_messages(self, destination_hash, limit=100, offset=0):
return self.provider.fetchall(
"SELECT * FROM lxmf_messages WHERE peer_hash = ? ORDER BY timestamp DESC LIMIT ? OFFSET ?",
@@ -103,6 +119,22 @@ class MessageDAO:
(destination_hash, now, now, now),
)
+ def mark_conversations_as_read(self, destination_hashes):
+ if not destination_hashes:
+ return
+ now = datetime.now(UTC).isoformat()
+ for destination_hash in destination_hashes:
+ self.provider.execute(
+ """
+ INSERT INTO lxmf_conversation_read_state (destination_hash, last_read_at, created_at, updated_at)
+ VALUES (?, ?, ?, ?)
+ ON CONFLICT(destination_hash) DO UPDATE SET
+ last_read_at = EXCLUDED.last_read_at,
+ updated_at = EXCLUDED.updated_at
+ """,
+ (destination_hash, now, now, now),
+ )
+
def is_conversation_unread(self, destination_hash):
row = self.provider.fetchone(
"""
@@ -290,3 +322,56 @@ class MessageDAO:
last_viewed_at = last_viewed_at.replace(tzinfo=UTC)
return message_timestamp <= last_viewed_at.timestamp()
+
+ # Folders
+ def get_all_folders(self):
+ return self.provider.fetchall("SELECT * FROM lxmf_folders ORDER BY name ASC")
+
+ def create_folder(self, name):
+ now = datetime.now(UTC).isoformat()
+ return self.provider.execute(
+ "INSERT INTO lxmf_folders (name, created_at, updated_at) VALUES (?, ?, ?)",
+ (name, now, now),
+ )
+
+ def rename_folder(self, folder_id, new_name):
+ now = datetime.now(UTC).isoformat()
+ self.provider.execute(
+ "UPDATE lxmf_folders SET name = ?, updated_at = ? WHERE id = ?",
+ (new_name, now, folder_id),
+ )
+
+ def delete_folder(self, folder_id):
+ self.provider.execute("DELETE FROM lxmf_folders WHERE id = ?", (folder_id,))
+
+ def get_conversation_folder(self, peer_hash):
+ return self.provider.fetchone(
+ "SELECT * FROM lxmf_conversation_folders WHERE peer_hash = ?",
+ (peer_hash,),
+ )
+
+ def move_conversation_to_folder(self, peer_hash, folder_id):
+ now = datetime.now(UTC).isoformat()
+ if folder_id is None:
+ self.provider.execute(
+ "DELETE FROM lxmf_conversation_folders WHERE peer_hash = ?",
+ (peer_hash,),
+ )
+ else:
+ self.provider.execute(
+ """
+ INSERT INTO lxmf_conversation_folders (peer_hash, folder_id, created_at, updated_at)
+ VALUES (?, ?, ?, ?)
+ ON CONFLICT(peer_hash) DO UPDATE SET
+ folder_id = EXCLUDED.folder_id,
+ updated_at = EXCLUDED.updated_at
+ """,
+ (peer_hash, folder_id, now, now),
+ )
+
+ def move_conversations_to_folder(self, peer_hashes, folder_id):
+ for peer_hash in peer_hashes:
+ self.move_conversation_to_folder(peer_hash, folder_id)
+
+ def get_all_conversation_folders(self):
+ return self.provider.fetchall("SELECT * FROM lxmf_conversation_folders")
diff --git a/meshchatx/src/backend/database/schema.py b/meshchatx/src/backend/database/schema.py
index 49f8b86..4a551cd 100644
--- a/meshchatx/src/backend/database/schema.py
+++ b/meshchatx/src/backend/database/schema.py
@@ -2,7 +2,7 @@ from .provider import DatabaseProvider
class DatabaseSchema:
- LATEST_VERSION = 35
+ LATEST_VERSION = 36
def __init__(self, provider: DatabaseProvider):
self.provider = provider
@@ -423,6 +423,24 @@ class DatabaseSchema:
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
)
""",
+ "lxmf_folders": """
+ CREATE TABLE IF NOT EXISTS lxmf_folders (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT UNIQUE,
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+ updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
+ )
+ """,
+ "lxmf_conversation_folders": """
+ CREATE TABLE IF NOT EXISTS lxmf_conversation_folders (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ peer_hash TEXT UNIQUE,
+ folder_id INTEGER,
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+ updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (folder_id) REFERENCES lxmf_folders(id) ON DELETE CASCADE
+ )
+ """,
}
for table_name, create_sql in tables.items():
@@ -933,6 +951,32 @@ class DatabaseSchema:
"ALTER TABLE contacts ADD COLUMN lxst_address TEXT DEFAULT NULL",
)
+ if current_version < 36:
+ self._safe_execute("""
+ CREATE TABLE IF NOT EXISTS lxmf_folders (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ name TEXT UNIQUE,
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+ updated_at DATETIME DEFAULT CURRENT_TIMESTAMP
+ )
+ """)
+ self._safe_execute("""
+ CREATE TABLE IF NOT EXISTS lxmf_conversation_folders (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ peer_hash TEXT UNIQUE,
+ folder_id INTEGER,
+ created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+ updated_at DATETIME DEFAULT CURRENT_TIMESTAMP,
+ FOREIGN KEY (folder_id) REFERENCES lxmf_folders(id) ON DELETE CASCADE
+ )
+ """)
+ self._safe_execute(
+ "CREATE INDEX IF NOT EXISTS idx_lxmf_conversation_folders_peer_hash ON lxmf_conversation_folders(peer_hash)",
+ )
+ self._safe_execute(
+ "CREATE INDEX IF NOT EXISTS idx_lxmf_conversation_folders_folder_id ON lxmf_conversation_folders(folder_id)",
+ )
+
# Update version in config
self._safe_execute(
"""
diff --git a/meshchatx/src/backend/docs_manager.py b/meshchatx/src/backend/docs_manager.py
index 64c4335..d78d031 100644
--- a/meshchatx/src/backend/docs_manager.py
+++ b/meshchatx/src/backend/docs_manager.py
@@ -38,7 +38,12 @@ class DocsManager:
# Ensure docs directories exist
try:
- for d in [self.docs_base_dir, self.versions_dir, self.meshchatx_docs_dir]:
+ for d in [
+ self.docs_base_dir,
+ self.versions_dir,
+ self.docs_dir,
+ self.meshchatx_docs_dir,
+ ]:
if not os.path.exists(d):
os.makedirs(d)
@@ -423,8 +428,6 @@ class DocsManager:
def has_docs(self):
# Check if index.html exists in the docs folder or if we have any versions
- if self.config.docs_downloaded.get():
- return True
return (
os.path.exists(os.path.join(self.docs_dir, "index.html"))
or len(self.get_available_versions()) > 0
diff --git a/meshchatx/src/backend/message_handler.py b/meshchatx/src/backend/message_handler.py
index 4fb1c8a..5a3deeb 100644
--- a/meshchatx/src/backend/message_handler.py
+++ b/meshchatx/src/backend/message_handler.py
@@ -35,6 +35,11 @@ class MessageHandler:
def delete_conversation(self, local_hash, destination_hash):
query = "DELETE FROM lxmf_messages WHERE peer_hash = ?"
self.db.provider.execute(query, [destination_hash])
+ # Also clean up folder mapping
+ self.db.provider.execute(
+ "DELETE FROM lxmf_conversation_folders WHERE peer_hash = ?",
+ [destination_hash],
+ )
def search_messages(self, local_hash, search_term):
like_term = f"%{search_term}%"
@@ -54,6 +59,7 @@ class MessageHandler:
filter_unread=False,
filter_failed=False,
filter_has_attachments=False,
+ folder_id=None,
limit=None,
offset=0,
):
@@ -66,6 +72,8 @@ class MessageHandler:
con.custom_image as contact_image,
i.icon_name, i.foreground_colour, i.background_colour,
r.last_read_at,
+ f.id as folder_id,
+ fn.name as folder_name,
(SELECT COUNT(*) FROM lxmf_messages m_failed
WHERE m_failed.peer_hash = m1.peer_hash AND m_failed.state = 'failed') as failed_count
FROM lxmf_messages m1
@@ -84,10 +92,20 @@ class MessageHandler:
)
LEFT JOIN lxmf_user_icons i ON i.destination_hash = m1.peer_hash
LEFT JOIN lxmf_conversation_read_state r ON r.destination_hash = m1.peer_hash
+ LEFT JOIN lxmf_conversation_folders f ON f.peer_hash = m1.peer_hash
+ LEFT JOIN lxmf_folders fn ON fn.id = f.folder_id
"""
params = []
where_clauses = []
+ if folder_id is not None:
+ if folder_id == 0 or folder_id == "0":
+ # Special case: no folder (Uncategorized)
+ where_clauses.append("f.folder_id IS NULL")
+ else:
+ where_clauses.append("f.folder_id = ?")
+ params.append(folder_id)
+
if filter_unread:
where_clauses.append(
"(r.last_read_at IS NULL OR m1.timestamp > strftime('%s', r.last_read_at))",
diff --git a/meshchatx/src/frontend/components/TutorialModal.vue b/meshchatx/src/frontend/components/TutorialModal.vue
index 6023cfe..f3db212 100644
--- a/meshchatx/src/frontend/components/TutorialModal.vue
+++ b/meshchatx/src/frontend/components/TutorialModal.vue
@@ -180,7 +180,161 @@
-
+
+
+
+
+ {{
+ $t("tutorial.discovery_question") ||
+ "Do you want to use community interface discovering and auto-connect?"
+ }}
+
+
+ {{
+ $t("tutorial.discovery_desc") ||
+ "This allows MeshChatX to automatically find and connect to public community nodes near you or on the internet."
+ }}
+
+ {{
+ $t("tutorial.discovery_question") ||
+ "Do you want to use community interface discovering and auto-connect?"
+ }}
-
-
+ {{
+ $t("tutorial.discovery_desc") ||
+ "This allows MeshChatX to automatically find and connect to public community nodes near you or on the internet."
+ }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Discovered
+
- {{ $t("tutorial.use") }}
+ Map All ({{ interfacesWithLocation.length }})
+ Publish your interfaces for others to find, or listen for announced entrypoints
+ and auto-connect to them.
+
+
+
+
+ Configure Per-Interface
+
+
+
+
+
Publish (Server)
+
+ Enable discovery while adding or editing an interface to broadcast reachable
+ details. Reticulum will sign and stamp announces automatically.
- Listen for discovery announces and optionally auto-connect to available
- interfaces.
+ Requires LXMF in the Python environment. Transport is optional for publishing,
+ but usually recommended so peers can connect back.
-
-
-
-
-
- Allowed Sources
+
+
+
+
+ Discover Interfaces (Peer)
+
+
+ Listen for discovery announces and optionally auto-connect to available
+ interfaces.
+