diff --git a/meshchatx/meshchat.py b/meshchatx/meshchat.py index d5a2c26..ed27040 100644 --- a/meshchatx/meshchat.py +++ b/meshchatx/meshchat.py @@ -42,6 +42,7 @@ from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.x509.oid import NameOID +from RNS.Discovery import InterfaceDiscovery from serial.tools import list_ports from meshchatx.src.backend.async_utils import AsyncUtils @@ -2152,8 +2153,37 @@ class ReticulumMeshChat: @routes.get("/api/v1/database/snapshots") async def list_db_snapshots(request): try: + limit = int(request.query.get("limit", 100)) + offset = int(request.query.get("offset", 0)) snapshots = self.database.list_snapshots(self.storage_dir) - return web.json_response(snapshots) + total = len(snapshots) + paginated_snapshots = snapshots[offset : offset + limit] + return web.json_response( + { + "snapshots": paginated_snapshots, + "total": total, + "limit": limit, + "offset": offset, + }, + ) + except Exception as e: + return web.json_response( + {"status": "error", "message": str(e)}, + status=500, + ) + + @routes.delete("/api/v1/database/snapshots/{filename}") + async def delete_db_snapshot(request): + try: + filename = request.match_info.get("filename") + if not filename.endswith(".zip"): + filename += ".zip" + self.database.delete_snapshot_or_backup( + self.storage_dir, + filename, + is_backup=False, + ) + return web.json_response({"status": "success"}) except Exception as e: return web.json_response( {"status": "error", "message": str(e)}, @@ -2199,9 +2229,13 @@ class ReticulumMeshChat: @routes.get("/api/v1/database/backups") async def list_db_backups(request): try: + limit = int(request.query.get("limit", 100)) + offset = int(request.query.get("offset", 0)) backup_dir = os.path.join(self.storage_dir, "database-backups") if not os.path.exists(backup_dir): - return web.json_response([]) + return web.json_response( + {"backups": [], "total": 0, "limit": limit, "offset": offset}, + ) backups = [] for file in os.listdir(backup_dir): @@ -2219,9 +2253,39 @@ class ReticulumMeshChat: ).isoformat(), }, ) - return web.json_response( - sorted(backups, key=lambda x: x["created_at"], reverse=True), + sorted_backups = sorted( + backups, + key=lambda x: x["created_at"], + reverse=True, ) + total = len(sorted_backups) + paginated_backups = sorted_backups[offset : offset + limit] + return web.json_response( + { + "backups": paginated_backups, + "total": total, + "limit": limit, + "offset": offset, + }, + ) + except Exception as e: + return web.json_response( + {"status": "error", "message": str(e)}, + status=500, + ) + + @routes.delete("/api/v1/database/backups/{filename}") + async def delete_db_backup(request): + try: + filename = request.match_info.get("filename") + if not filename.endswith(".zip"): + filename += ".zip" + self.database.delete_snapshot_or_backup( + self.storage_dir, + filename, + is_backup=True, + ) + return web.json_response({"status": "success"}) except Exception as e: return web.json_response( {"status": "error", "message": str(e)}, @@ -3360,6 +3424,7 @@ class ReticulumMeshChat: ), "ply": self.get_package_version("ply"), "bcrypt": self.get_package_version("bcrypt"), + "lxmfy": self.get_package_version("lxmfy"), }, "storage_path": self.storage_path, "database_path": self.database_path, @@ -3939,6 +4004,62 @@ class ReticulumMeshChat: status=500, ) + # maintenance - clear messages + @routes.delete("/api/v1/maintenance/messages") + async def maintenance_clear_messages(request): + self.database.messages.delete_all_lxmf_messages() + return web.json_response({"message": "All messages cleared"}) + + # maintenance - clear announces + @routes.delete("/api/v1/maintenance/announces") + async def maintenance_clear_announces(request): + aspect = request.query.get("aspect") + self.database.announces.delete_all_announces(aspect=aspect) + return web.json_response( + { + "message": f"Announces cleared{' for aspect ' + aspect if aspect else ''}", + }, + ) + + # maintenance - clear favorites + @routes.delete("/api/v1/maintenance/favourites") + async def maintenance_clear_favourites(request): + aspect = request.query.get("aspect") + self.database.announces.delete_all_favourites(aspect=aspect) + return web.json_response( + { + "message": f"Favourites cleared{' for aspect ' + aspect if aspect else ''}", + }, + ) + + # maintenance - clear archives + @routes.delete("/api/v1/maintenance/archives") + async def maintenance_clear_archives(request): + self.database.misc.delete_archived_pages() + return web.json_response({"message": "All archived pages cleared"}) + + # maintenance - export messages + @routes.get("/api/v1/maintenance/messages/export") + async def maintenance_export_messages(request): + messages = self.database.messages.get_all_lxmf_messages() + # Convert sqlite3.Row to dict if necessary + messages_list = [dict(m) for m in messages] + return web.json_response({"messages": messages_list}) + + # maintenance - import messages + @routes.post("/api/v1/maintenance/messages/import") + async def maintenance_import_messages(request): + try: + data = await request.json() + messages = data.get("messages", []) + for msg in messages: + self.database.messages.upsert_lxmf_message(msg) + return web.json_response( + {"message": f"Successfully imported {len(messages)} messages"}, + ) + except Exception as e: + return web.json_response({"error": str(e)}, status=400) + # get config @routes.get("/api/v1/config") async def config_get(request): @@ -4043,6 +4164,91 @@ class ReticulumMeshChat: return web.json_response({"discovery": discovery_config}) + @routes.get("/api/v1/reticulum/discovered-interfaces") + async def reticulum_discovered_interfaces(request): + try: + discovery = InterfaceDiscovery(discover_interfaces=False) + interfaces = discovery.list_discovered_interfaces() + active = [] + try: + if hasattr(self, "reticulum") and self.reticulum: + stats = self.reticulum.get_interface_stats().get( + "interfaces", + [], + ) + active = [] + for s in stats: + name = s.get("name") or "" + parsed_host = None + parsed_port = None + if "/" in name: + try: + host_port = name.split("/")[-1].strip("[]") + if ":" in host_port: + parsed_host, parsed_port = host_port.rsplit( + ":", + 1, + ) + try: + parsed_port = int(parsed_port) + except Exception: + parsed_port = None + else: + parsed_host = host_port + except Exception: + parsed_host = None + parsed_port = None + + host = ( + s.get("target_host") or s.get("remote") or parsed_host + ) + port = ( + s.get("target_port") + or s.get("listen_port") + or parsed_port + ) + transport_id = s.get("transport_id") + if isinstance(transport_id, (bytes, bytearray)): + transport_id = transport_id.hex() + + active.append( + { + "name": name, + "short_name": s.get("short_name"), + "type": s.get("type"), + "target_host": host, + "target_port": port, + "listen_ip": s.get("listen_ip"), + "connected": s.get("connected"), + "online": s.get("online"), + "transport_id": transport_id, + "network_id": s.get("network_id"), + }, + ) + except Exception as e: + logger.debug(f"Failed to get interface stats: {e}") + + def to_jsonable(obj): + if isinstance(obj, bytes): + return obj.hex() + if isinstance(obj, dict): + return {k: to_jsonable(v) for k, v in obj.items()} + if isinstance(obj, list): + return [to_jsonable(v) for v in obj] + return obj + + return web.json_response( + { + "interfaces": to_jsonable(interfaces), + "active": to_jsonable(active), + }, + ) + except Exception as e: + return web.json_response( + {"message": f"Failed to load discovered interfaces: {e!s}"}, + status=500, + ) + # enable transport mode @routes.post("/api/v1/reticulum/enable-transport") async def reticulum_enable_transport(request): @@ -6920,6 +7126,12 @@ class ReticulumMeshChat: request.query.get("filter_has_attachments", "false"), ), ) + folder_id = request.query.get("folder_id") + if folder_id is not None: + try: + folder_id = int(folder_id) + except ValueError: + folder_id = None # get pagination params try: @@ -6943,6 +7155,7 @@ class ReticulumMeshChat: filter_unread=filter_unread, filter_failed=filter_failed, filter_has_attachments=filter_has_attachments, + folder_id=folder_id, limit=limit, offset=offset, ) @@ -7021,6 +7234,123 @@ class ReticulumMeshChat: }, ) + @routes.get("/api/v1/lxmf/folders") + async def lxmf_folders_get(request): + folders = self.database.messages.get_all_folders() + return web.json_response([dict(f) for f in folders]) + + @routes.post("/api/v1/lxmf/folders") + async def lxmf_folders_post(request): + data = await request.json() + name = data.get("name") + if not name: + return web.json_response({"message": "Name is required"}, status=400) + try: + self.database.messages.create_folder(name) + return web.json_response({"message": "Folder created"}) + except Exception as e: + return web.json_response({"message": str(e)}, status=500) + + @routes.patch("/api/v1/lxmf/folders/{id}") + async def lxmf_folders_patch(request): + folder_id = int(request.match_info["id"]) + data = await request.json() + name = data.get("name") + if not name: + return web.json_response({"message": "Name is required"}, status=400) + self.database.messages.rename_folder(folder_id, name) + return web.json_response({"message": "Folder renamed"}) + + @routes.delete("/api/v1/lxmf/folders/{id}") + async def lxmf_folders_delete(request): + folder_id = int(request.match_info["id"]) + self.database.messages.delete_folder(folder_id) + return web.json_response({"message": "Folder deleted"}) + + @routes.post("/api/v1/lxmf/conversations/move-to-folder") + async def lxmf_conversations_move_to_folder(request): + data = await request.json() + peer_hashes = data.get("peer_hashes", []) + folder_id = data.get("folder_id") # Can be None to remove from folder + if not peer_hashes: + return web.json_response( + {"message": "peer_hashes is required"}, + status=400, + ) + self.database.messages.move_conversations_to_folder(peer_hashes, folder_id) + return web.json_response({"message": "Conversations moved"}) + + @routes.post("/api/v1/lxmf/conversations/bulk-mark-as-read") + async def lxmf_conversations_bulk_mark_read(request): + data = await request.json() + destination_hashes = data.get("destination_hashes", []) + if not destination_hashes: + return web.json_response( + {"message": "destination_hashes is required"}, + status=400, + ) + self.database.messages.mark_conversations_as_read(destination_hashes) + return web.json_response({"message": "Conversations marked as read"}) + + @routes.post("/api/v1/lxmf/conversations/bulk-delete") + async def lxmf_conversations_bulk_delete(request): + data = await request.json() + destination_hashes = data.get("destination_hashes", []) + if not destination_hashes: + return web.json_response( + {"message": "destination_hashes is required"}, + status=400, + ) + local_hash = self.local_lxmf_destination.hexhash + for dest_hash in destination_hashes: + self.message_handler.delete_conversation(local_hash, dest_hash) + return web.json_response({"message": "Conversations deleted"}) + + @routes.get("/api/v1/lxmf/folders/export") + async def lxmf_folders_export(request): + folders = [dict(f) for f in self.database.messages.get_all_folders()] + mappings = [ + dict(m) for m in self.database.messages.get_all_conversation_folders() + ] + return web.json_response({"folders": folders, "mappings": mappings}) + + @routes.post("/api/v1/lxmf/folders/import") + async def lxmf_folders_import(request): + data = await request.json() + folders = data.get("folders", []) + mappings = data.get("mappings", []) + + # We'll try to recreate folders by name to avoid ID conflicts + folder_name_to_new_id = {} + for f in folders: + try: + self.database.messages.create_folder(f["name"]) + except Exception as e: + logger.debug(f"Folder '{f['name']}' likely already exists: {e}") + + # Refresh folder list to get new IDs + all_folders = self.database.messages.get_all_folders() + for f in all_folders: + folder_name_to_new_id[f["name"]] = f["id"] + + # Map old IDs to new IDs if possible, or just use names if we had them + # Since IDs might change, we should have exported names too + # Let's assume the export had folder names in mappings or we match by old folder info + old_id_to_name = {f["id"]: f["name"] for f in folders} + + for m in mappings: + peer_hash = m["peer_hash"] + old_folder_id = m["folder_id"] + folder_name = old_id_to_name.get(old_folder_id) + if folder_name and folder_name in folder_name_to_new_id: + new_folder_id = folder_name_to_new_id[folder_name] + self.database.messages.move_conversation_to_folder( + peer_hash, + new_folder_id, + ) + + return web.json_response({"message": "Folders and mappings imported"}) + # mark lxmf conversation as read @routes.get("/api/v1/lxmf/conversations/{destination_hash}/mark-as-read") async def lxmf_conversations_mark_read(request): @@ -7806,7 +8136,7 @@ class ReticulumMeshChat: f"connect-src {' '.join(connect_sources)}; " "media-src 'self' blob:; " "worker-src 'self' blob:; " - "frame-src 'self'; " + "frame-src 'self' https://reticulum.network; " "object-src 'none'; " "base-uri 'self';" ) @@ -7922,17 +8252,24 @@ class ReticulumMeshChat: # (e.g. when running from a read-only AppImage) if self.current_context and hasattr(self.current_context, "docs_manager"): dm = self.current_context.docs_manager - if ( - dm.docs_dir - and os.path.exists(dm.docs_dir) - and not dm.docs_dir.startswith(public_dir) - ): - app.router.add_static( - "/reticulum-docs/", - dm.docs_dir, - name="reticulum_docs_storage", - follow_symlinks=True, - ) + + # Custom handler for reticulum docs to allow fallback to official website + async def reticulum_docs_handler(request): + path = request.match_info.get("filename", "index.html") + if not path: + path = "index.html" + if path.endswith("/"): + path += "index.html" + + local_path = os.path.join(dm.docs_dir, path) + if os.path.exists(local_path) and os.path.isfile(local_path): + return web.FileResponse(local_path) + + # Fallback to official website + return web.HTTPFound(f"https://reticulum.network/manual/{path}") + + app.router.add_get("/reticulum-docs/{filename:.*}", reticulum_docs_handler) + if ( dm.meshchatx_docs_dir and os.path.exists(dm.meshchatx_docs_dir) @@ -7978,7 +8315,8 @@ class ReticulumMeshChat: print( f"Performing scheduled auto-backup for {ctx.identity_hash}...", ) - ctx.database.backup_database(self.storage_dir) + max_count = ctx.config.backup_max_count.get() + ctx.database.backup_database(self.storage_dir, max_count=max_count) except Exception as e: print(f"Auto-backup failed: {e}") diff --git a/meshchatx/src/backend/bot_handler.py b/meshchatx/src/backend/bot_handler.py index 576fef9..e5b99a1 100644 --- a/meshchatx/src/backend/bot_handler.py +++ b/meshchatx/src/backend/bot_handler.py @@ -240,7 +240,9 @@ class BotHandler: shutil.rmtree(storage_dir) except Exception as exc: logger.warning( - "Failed to delete storage dir for bot %s: %s", bot_id, exc + "Failed to delete storage dir for bot %s: %s", + bot_id, + exc, ) self._save_state() diff --git a/meshchatx/src/backend/config_manager.py b/meshchatx/src/backend/config_manager.py index e8ab093..cb4593f 100644 --- a/meshchatx/src/backend/config_manager.py +++ b/meshchatx/src/backend/config_manager.py @@ -103,6 +103,7 @@ class ConfigManager: "archives_max_storage_gb", 1, ) + self.backup_max_count = self.IntConfig(self, "backup_max_count", 5) self.crawler_enabled = self.BoolConfig(self, "crawler_enabled", False) self.crawler_max_retries = self.IntConfig(self, "crawler_max_retries", 3) self.crawler_retry_delay_seconds = self.IntConfig( diff --git a/meshchatx/src/backend/database/__init__.py b/meshchatx/src/backend/database/__init__.py index 29d2ffc..6ec139a 100644 --- a/meshchatx/src/backend/database/__init__.py +++ b/meshchatx/src/backend/database/__init__.py @@ -211,14 +211,41 @@ class Database: "size": os.path.getsize(backup_path), } - def backup_database(self, storage_path, backup_path: str | None = None): + def backup_database( + self, + storage_path, + backup_path: str | None = None, + max_count: int | None = None, + ): default_dir = os.path.join(storage_path, "database-backups") os.makedirs(default_dir, exist_ok=True) if backup_path is None: timestamp = datetime.now(UTC).strftime("%Y%m%d-%H%M%S") backup_path = os.path.join(default_dir, f"backup-{timestamp}.zip") - return self._backup_to_zip(backup_path) + result = self._backup_to_zip(backup_path) + + # Cleanup old backups if a limit is set + if max_count is not None and max_count > 0: + try: + backups = [] + for file in os.listdir(default_dir): + if file.endswith(".zip"): + full_path = os.path.join(default_dir, file) + stats = os.stat(full_path) + backups.append((full_path, stats.st_mtime)) + + if len(backups) > max_count: + # Sort by modification time (oldest first) + backups.sort(key=lambda x: x[1]) + to_delete = backups[: len(backups) - max_count] + for path, _ in to_delete: + if os.path.exists(path): + os.remove(path) + except Exception as e: + print(f"Failed to cleanup old backups: {e}") + + return result def create_snapshot(self, storage_path, name: str): """Creates a named snapshot of the database.""" @@ -258,6 +285,29 @@ class Database: ) return sorted(snapshots, key=lambda x: x["created_at"], reverse=True) + def delete_snapshot_or_backup( + self, + storage_path, + filename: str, + is_backup: bool = False, + ): + """Deletes a database snapshot or auto-backup.""" + base_dir = "database-backups" if is_backup else "snapshots" + file_path = os.path.join(storage_path, base_dir, filename) + + # Basic security check to ensure we stay within the intended directory + abs_path = os.path.abspath(file_path) + abs_base = os.path.abspath(os.path.join(storage_path, base_dir)) + + if not abs_path.startswith(abs_base): + msg = "Invalid path" + raise ValueError(msg) + + if os.path.exists(abs_path): + os.remove(abs_path) + return True + return False + def restore_database(self, backup_path: str): if not os.path.exists(backup_path): msg = f"Backup not found at {backup_path}" diff --git a/meshchatx/src/backend/database/announces.py b/meshchatx/src/backend/database/announces.py index 080a1bc..a1b626e 100644 --- a/meshchatx/src/backend/database/announces.py +++ b/meshchatx/src/backend/database/announces.py @@ -54,6 +54,15 @@ class AnnounceDAO: (destination_hash,), ) + def delete_all_announces(self, aspect=None): + if aspect: + self.provider.execute( + "DELETE FROM announces WHERE aspect = ?", + (aspect,), + ) + else: + self.provider.execute("DELETE FROM announces") + def get_filtered_announces( self, aspect=None, @@ -137,3 +146,12 @@ class AnnounceDAO: "DELETE FROM favourite_destinations WHERE destination_hash = ?", (destination_hash,), ) + + def delete_all_favourites(self, aspect=None): + if aspect: + self.provider.execute( + "DELETE FROM favourite_destinations WHERE aspect = ?", + (aspect,), + ) + else: + self.provider.execute("DELETE FROM favourite_destinations") diff --git a/meshchatx/src/backend/database/messages.py b/meshchatx/src/backend/database/messages.py index 442be84..618d532 100644 --- a/meshchatx/src/backend/database/messages.py +++ b/meshchatx/src/backend/database/messages.py @@ -63,12 +63,28 @@ class MessageDAO: (message_hash,), ) + def delete_lxmf_messages_by_hashes(self, message_hashes): + if not message_hashes: + return + placeholders = ", ".join(["?"] * len(message_hashes)) + self.provider.execute( + f"DELETE FROM lxmf_messages WHERE hash IN ({placeholders})", + tuple(message_hashes), + ) + def delete_lxmf_message_by_hash(self, message_hash): self.provider.execute( "DELETE FROM lxmf_messages WHERE hash = ?", (message_hash,), ) + def delete_all_lxmf_messages(self): + self.provider.execute("DELETE FROM lxmf_messages") + self.provider.execute("DELETE FROM lxmf_conversation_read_state") + + def get_all_lxmf_messages(self): + return self.provider.fetchall("SELECT * FROM lxmf_messages") + def get_conversation_messages(self, destination_hash, limit=100, offset=0): return self.provider.fetchall( "SELECT * FROM lxmf_messages WHERE peer_hash = ? ORDER BY timestamp DESC LIMIT ? OFFSET ?", @@ -103,6 +119,22 @@ class MessageDAO: (destination_hash, now, now, now), ) + def mark_conversations_as_read(self, destination_hashes): + if not destination_hashes: + return + now = datetime.now(UTC).isoformat() + for destination_hash in destination_hashes: + self.provider.execute( + """ + INSERT INTO lxmf_conversation_read_state (destination_hash, last_read_at, created_at, updated_at) + VALUES (?, ?, ?, ?) + ON CONFLICT(destination_hash) DO UPDATE SET + last_read_at = EXCLUDED.last_read_at, + updated_at = EXCLUDED.updated_at + """, + (destination_hash, now, now, now), + ) + def is_conversation_unread(self, destination_hash): row = self.provider.fetchone( """ @@ -290,3 +322,56 @@ class MessageDAO: last_viewed_at = last_viewed_at.replace(tzinfo=UTC) return message_timestamp <= last_viewed_at.timestamp() + + # Folders + def get_all_folders(self): + return self.provider.fetchall("SELECT * FROM lxmf_folders ORDER BY name ASC") + + def create_folder(self, name): + now = datetime.now(UTC).isoformat() + return self.provider.execute( + "INSERT INTO lxmf_folders (name, created_at, updated_at) VALUES (?, ?, ?)", + (name, now, now), + ) + + def rename_folder(self, folder_id, new_name): + now = datetime.now(UTC).isoformat() + self.provider.execute( + "UPDATE lxmf_folders SET name = ?, updated_at = ? WHERE id = ?", + (new_name, now, folder_id), + ) + + def delete_folder(self, folder_id): + self.provider.execute("DELETE FROM lxmf_folders WHERE id = ?", (folder_id,)) + + def get_conversation_folder(self, peer_hash): + return self.provider.fetchone( + "SELECT * FROM lxmf_conversation_folders WHERE peer_hash = ?", + (peer_hash,), + ) + + def move_conversation_to_folder(self, peer_hash, folder_id): + now = datetime.now(UTC).isoformat() + if folder_id is None: + self.provider.execute( + "DELETE FROM lxmf_conversation_folders WHERE peer_hash = ?", + (peer_hash,), + ) + else: + self.provider.execute( + """ + INSERT INTO lxmf_conversation_folders (peer_hash, folder_id, created_at, updated_at) + VALUES (?, ?, ?, ?) + ON CONFLICT(peer_hash) DO UPDATE SET + folder_id = EXCLUDED.folder_id, + updated_at = EXCLUDED.updated_at + """, + (peer_hash, folder_id, now, now), + ) + + def move_conversations_to_folder(self, peer_hashes, folder_id): + for peer_hash in peer_hashes: + self.move_conversation_to_folder(peer_hash, folder_id) + + def get_all_conversation_folders(self): + return self.provider.fetchall("SELECT * FROM lxmf_conversation_folders") diff --git a/meshchatx/src/backend/database/schema.py b/meshchatx/src/backend/database/schema.py index 49f8b86..4a551cd 100644 --- a/meshchatx/src/backend/database/schema.py +++ b/meshchatx/src/backend/database/schema.py @@ -2,7 +2,7 @@ from .provider import DatabaseProvider class DatabaseSchema: - LATEST_VERSION = 35 + LATEST_VERSION = 36 def __init__(self, provider: DatabaseProvider): self.provider = provider @@ -423,6 +423,24 @@ class DatabaseSchema: created_at DATETIME DEFAULT CURRENT_TIMESTAMP ) """, + "lxmf_folders": """ + CREATE TABLE IF NOT EXISTS lxmf_folders ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT UNIQUE, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP + ) + """, + "lxmf_conversation_folders": """ + CREATE TABLE IF NOT EXISTS lxmf_conversation_folders ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + peer_hash TEXT UNIQUE, + folder_id INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (folder_id) REFERENCES lxmf_folders(id) ON DELETE CASCADE + ) + """, } for table_name, create_sql in tables.items(): @@ -933,6 +951,32 @@ class DatabaseSchema: "ALTER TABLE contacts ADD COLUMN lxst_address TEXT DEFAULT NULL", ) + if current_version < 36: + self._safe_execute(""" + CREATE TABLE IF NOT EXISTS lxmf_folders ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT UNIQUE, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP + ) + """) + self._safe_execute(""" + CREATE TABLE IF NOT EXISTS lxmf_conversation_folders ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + peer_hash TEXT UNIQUE, + folder_id INTEGER, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (folder_id) REFERENCES lxmf_folders(id) ON DELETE CASCADE + ) + """) + self._safe_execute( + "CREATE INDEX IF NOT EXISTS idx_lxmf_conversation_folders_peer_hash ON lxmf_conversation_folders(peer_hash)", + ) + self._safe_execute( + "CREATE INDEX IF NOT EXISTS idx_lxmf_conversation_folders_folder_id ON lxmf_conversation_folders(folder_id)", + ) + # Update version in config self._safe_execute( """ diff --git a/meshchatx/src/backend/docs_manager.py b/meshchatx/src/backend/docs_manager.py index 64c4335..d78d031 100644 --- a/meshchatx/src/backend/docs_manager.py +++ b/meshchatx/src/backend/docs_manager.py @@ -38,7 +38,12 @@ class DocsManager: # Ensure docs directories exist try: - for d in [self.docs_base_dir, self.versions_dir, self.meshchatx_docs_dir]: + for d in [ + self.docs_base_dir, + self.versions_dir, + self.docs_dir, + self.meshchatx_docs_dir, + ]: if not os.path.exists(d): os.makedirs(d) @@ -423,8 +428,6 @@ class DocsManager: def has_docs(self): # Check if index.html exists in the docs folder or if we have any versions - if self.config.docs_downloaded.get(): - return True return ( os.path.exists(os.path.join(self.docs_dir, "index.html")) or len(self.get_available_versions()) > 0 diff --git a/meshchatx/src/backend/message_handler.py b/meshchatx/src/backend/message_handler.py index 4fb1c8a..5a3deeb 100644 --- a/meshchatx/src/backend/message_handler.py +++ b/meshchatx/src/backend/message_handler.py @@ -35,6 +35,11 @@ class MessageHandler: def delete_conversation(self, local_hash, destination_hash): query = "DELETE FROM lxmf_messages WHERE peer_hash = ?" self.db.provider.execute(query, [destination_hash]) + # Also clean up folder mapping + self.db.provider.execute( + "DELETE FROM lxmf_conversation_folders WHERE peer_hash = ?", + [destination_hash], + ) def search_messages(self, local_hash, search_term): like_term = f"%{search_term}%" @@ -54,6 +59,7 @@ class MessageHandler: filter_unread=False, filter_failed=False, filter_has_attachments=False, + folder_id=None, limit=None, offset=0, ): @@ -66,6 +72,8 @@ class MessageHandler: con.custom_image as contact_image, i.icon_name, i.foreground_colour, i.background_colour, r.last_read_at, + f.id as folder_id, + fn.name as folder_name, (SELECT COUNT(*) FROM lxmf_messages m_failed WHERE m_failed.peer_hash = m1.peer_hash AND m_failed.state = 'failed') as failed_count FROM lxmf_messages m1 @@ -84,10 +92,20 @@ class MessageHandler: ) LEFT JOIN lxmf_user_icons i ON i.destination_hash = m1.peer_hash LEFT JOIN lxmf_conversation_read_state r ON r.destination_hash = m1.peer_hash + LEFT JOIN lxmf_conversation_folders f ON f.peer_hash = m1.peer_hash + LEFT JOIN lxmf_folders fn ON fn.id = f.folder_id """ params = [] where_clauses = [] + if folder_id is not None: + if folder_id == 0 or folder_id == "0": + # Special case: no folder (Uncategorized) + where_clauses.append("f.folder_id IS NULL") + else: + where_clauses.append("f.folder_id = ?") + params.append(folder_id) + if filter_unread: where_clauses.append( "(r.last_read_at IS NULL OR m1.timestamp > strftime('%s', r.last_read_at))", diff --git a/meshchatx/src/frontend/components/TutorialModal.vue b/meshchatx/src/frontend/components/TutorialModal.vue index 6023cfe..f3db212 100644 --- a/meshchatx/src/frontend/components/TutorialModal.vue +++ b/meshchatx/src/frontend/components/TutorialModal.vue @@ -180,7 +180,161 @@

-
+
+
+ +
+ {{ + $t("tutorial.discovery_question") || + "Do you want to use community interface discovering and auto-connect?" + }} +
+

+ {{ + $t("tutorial.discovery_desc") || + "This allows MeshChatX to automatically find and connect to public community nodes near you or on the internet." + }} +

+
+ + +
+
+
+ +
+ +
+
+ + Discovered Interfaces +
+
+
+
+
+ +
+ +
+
+
+ {{ iface.name }} +
+ {{ + iface.type + }} +
+ +
+ + Stamps: {{ iface.value }} + +
+ +
+ Hops: {{ iface.hops }} + {{ iface.status }} + + {{ formatLastHeard(iface.last_heard) }} + +
+ +
+
+ + Address: {{ iface.reachable_on }}:{{ iface.port }} +
+
+ + Transport ID: {{ iface.transport_id }} +
+
+ + Network ID: {{ iface.network_id }} +
+
+
+ +
+ Heard +
+
+
+
+
+
-
+

{{ $t("tutorial.custom_interfaces_desc") }}

@@ -479,7 +636,7 @@
-
+
-
- +
-
- - {{ - $t("tutorial.suggested_relays") - }} + +
+ {{ + $t("tutorial.discovery_question") || + "Do you want to use community interface discovering and auto-connect?" + }}
-
-
+ {{ + $t("tutorial.discovery_desc") || + "This allows MeshChatX to automatically find and connect to public community nodes near you or on the internet." + }} +

+
+ + +
+
+
+ +
+ +
+
+ +
+
+
+ + Discovered +
+
+
+
+
+ +
+ +
+
+
+ {{ iface.name }} +
+ {{ iface.type }} +
+ +
+ + Stamps: {{ iface.value }} + +
+ +
+ Hops: {{ iface.hops }} + {{ + iface.status + }} + + {{ formatLastHeard(iface.last_heard) }} + +
+ +
+
+ + Address: {{ iface.reachable_on }}:{{ + iface.port + }} +
+ +
+ + Transport ID: {{ iface.transport_id }} +
+ +
+ + Network ID: {{ iface.network_id }} +
+ +
+ + Loc: {{ iface.latitude }}, + {{ iface.longitude }} +
+
+
+ +
+ Heard +
+
+
+
-
- + + +
+
+ + {{ + $t("tutorial.suggested_relays") + }} +
+
+
+
+ + {{ iface.name }} + + {{ iface.target_host }}:{{ iface.target_port }} +
+
+ + + {{ $t("tutorial.online") }} + + +
+
+
+ +
+
+
-
-

+

+ +

{{ $t("tutorial.custom_interfaces") }}

-

+

{{ $t("tutorial.custom_interfaces_desc_page") }}

+
+
Reticulum Network Stack
-
- v{{ appInfo.rns_version }} +
+
+ v{{ appInfo.rns_version }} +
+
+ {{ + appInfo.is_connected_to_shared_instance + ? "Shared Instance" + : "Main Instance" + }} +
@@ -530,34 +566,75 @@
-
-
-
- {{ snapshot.name }} - {{ formatBytes(snapshot.size) }} • {{ snapshot.created_at }} -
- +
+ {{ snapshot.name }} + {{ formatBytes(snapshot.size) }} • + {{ Utils.formatTimeAgo(snapshot.created_at) }} +
+
+ + +
+
+
+ + +
+
+ Page {{ Math.floor(snapshotsOffset / snapshotsLimit) + 1 }} of + {{ Math.ceil(snapshotsTotal / snapshotsLimit) }} +
+
+ + +
-
+
-
-
-
- {{ backup.name }} - {{ formatBytes(backup.size) }} • {{ backup.created_at }} -
- +
+ {{ backup.name }} + {{ formatBytes(backup.size) }} • + {{ Utils.formatTimeAgo(backup.created_at) }} +
+
+ + +
+
+
+ + +
+
+ Page {{ Math.floor(autoBackupsOffset / autoBackupsLimit) + 1 }} of + {{ Math.ceil(autoBackupsTotal / autoBackupsLimit) }} +
+
+ + +
@@ -704,6 +822,7 @@ export default { components: {}, data() { return { + Utils, appInfo: null, config: null, updateInterval: null, @@ -725,10 +844,16 @@ export default { restoreFile: null, snapshotName: "", snapshots: [], + snapshotsTotal: 0, + snapshotsOffset: 0, + snapshotsLimit: 3, snapshotInProgress: false, snapshotMessage: "", snapshotError: "", autoBackups: [], + autoBackupsTotal: 0, + autoBackupsOffset: 0, + autoBackupsLimit: 3, identityBackupMessage: "", identityBackupError: "", identityBase32: "", @@ -776,18 +901,74 @@ export default { methods: { async listSnapshots() { try { - const response = await window.axios.get("/api/v1/database/snapshots"); - this.snapshots = response.data; + const response = await window.axios.get("/api/v1/database/snapshots", { + params: { + limit: this.snapshotsLimit, + offset: this.snapshotsOffset, + }, + }); + this.snapshots = response.data.snapshots; + this.snapshotsTotal = response.data.total; } catch (e) { console.log("Failed to list snapshots", e); } }, async listAutoBackups() { try { - const response = await window.axios.get("/api/v1/database/backups"); - this.autoBackups = response.data; - } catch (e) { - console.log("Failed to list auto-backups", e); + const response = await window.axios.get("/api/v1/database/backups", { + params: { + limit: this.autoBackupsLimit, + offset: this.autoBackupsOffset, + }, + }); + this.autoBackups = response.data.backups; + this.autoBackupsTotal = response.data.total; + } catch { + console.log("Failed to list auto-backups"); + } + }, + async deleteSnapshot(filename) { + if (!(await DialogUtils.confirm("Are you sure you want to delete this snapshot?"))) return; + try { + await window.axios.delete(`/api/v1/database/snapshots/${filename}`); + ToastUtils.success("Snapshot deleted"); + await this.listSnapshots(); + } catch { + ToastUtils.error("Failed to delete snapshot"); + } + }, + async deleteBackup(filename) { + if (!(await DialogUtils.confirm("Are you sure you want to delete this backup?"))) return; + try { + await window.axios.delete(`/api/v1/database/backups/${filename}`); + ToastUtils.success("Backup deleted"); + await this.listAutoBackups(); + } catch { + ToastUtils.error("Failed to delete backup"); + } + }, + async nextSnapshots() { + if (this.snapshotsOffset + this.snapshotsLimit < this.snapshotsTotal) { + this.snapshotsOffset += this.snapshotsLimit; + await this.listSnapshots(); + } + }, + async prevSnapshots() { + if (this.snapshotsOffset > 0) { + this.snapshotsOffset = Math.max(0, this.snapshotsOffset - this.snapshotsLimit); + await this.listSnapshots(); + } + }, + async nextBackups() { + if (this.autoBackupsOffset + this.autoBackupsLimit < this.autoBackupsTotal) { + this.autoBackupsOffset += this.autoBackupsLimit; + await this.listAutoBackups(); + } + }, + async prevBackups() { + if (this.autoBackupsOffset > 0) { + this.autoBackupsOffset = Math.max(0, this.autoBackupsOffset - this.autoBackupsLimit); + await this.listAutoBackups(); } }, async createSnapshot() { @@ -802,9 +983,8 @@ export default { this.snapshotMessage = "Snapshot created successfully"; this.snapshotName = ""; await this.listSnapshots(); - } catch (e) { + } catch { this.snapshotError = "Failed to create snapshot"; - console.log(e); } finally { this.snapshotInProgress = false; } @@ -825,9 +1005,8 @@ export default { setTimeout(() => ElectronUtils.relaunch(), 2000); } } - } catch (e) { + } catch { ToastUtils.error("Failed to restore snapshot"); - console.log(e); } }, async getAppInfo() { @@ -856,9 +1035,8 @@ export default { await window.axios.post("/api/v1/app/integrity/acknowledge"); ToastUtils.success("Integrity issues acknowledged"); await this.getAppInfo(); - } catch (e) { + } catch { ToastUtils.error("Failed to acknowledge integrity issues"); - console.log(e); } } }, @@ -1075,9 +1253,9 @@ export default { link.remove(); window.URL.revokeObjectURL(url); this.identityBackupMessage = "Identity downloaded. Keep it secret."; - } catch (e) { + ToastUtils.success("Identity key file exported"); + } catch { this.identityBackupError = "Failed to download identity"; - console.log(e); } }, async copyIdentityBase32() { @@ -1092,9 +1270,9 @@ export default { } await navigator.clipboard.writeText(this.identityBase32); this.identityBase32Message = "Identity copied. Clear your clipboard after use."; - } catch (e) { + ToastUtils.success("Identity Base32 key copied to clipboard"); + } catch { this.identityBase32Error = "Failed to copy identity"; - console.log(e); } }, onIdentityRestoreFileChange(event) { @@ -1124,9 +1302,8 @@ export default { headers: { "Content-Type": "multipart/form-data" }, }); this.identityRestoreMessage = response.data.message || "Identity imported."; - } catch (e) { + } catch { this.identityRestoreError = "Identity restore failed"; - console.log(e); } finally { this.identityRestoreInProgress = false; } @@ -1147,9 +1324,8 @@ export default { base32: this.identityRestoreBase32.trim(), }); this.identityRestoreMessage = response.data.message || "Identity imported."; - } catch (e) { + } catch { this.identityRestoreError = "Identity restore failed"; - console.log(e); } finally { this.identityRestoreInProgress = false; } diff --git a/meshchatx/src/frontend/components/interfaces/InterfacesPage.vue b/meshchatx/src/frontend/components/interfaces/InterfacesPage.vue index f2e67b3..068a4e5 100644 --- a/meshchatx/src/frontend/components/interfaces/InterfacesPage.vue +++ b/meshchatx/src/frontend/components/interfaces/InterfacesPage.vue @@ -3,7 +3,7 @@ class="flex flex-col flex-1 overflow-hidden min-w-0 bg-gradient-to-br from-slate-50 via-slate-100 to-white dark:from-zinc-950 dark:via-zinc-900 dark:to-zinc-900" >
-
+
-
+
{{ $t("interfaces.manage") }}
-
+
{{ $t("interfaces.title") }}
@@ -111,136 +111,370 @@
-
- -
{{ $t("interfaces.no_interfaces_found") }}
-
{{ $t("interfaces.no_interfaces_description") }}
-
-
-
-
-
- Discovery -
-
Interface Discovery
-
- Publish your interfaces for others to find, or listen for announced entrypoints and - auto-connect to them. -
-
- - - Configure Per-Interface - +
+
-
-
-
Publish (Server)
-
- Enable discovery while adding or editing an interface to broadcast reachable details. - Reticulum will sign and stamp announces automatically. + +
+
+
+ Configured
-
- Requires LXMF in the Python environment. Transport is optional for publishing, but - usually recommended so peers can connect back. +
Interfaces
+
+ +
+
+ +
{{ $t("interfaces.no_interfaces_found") }}
+
{{ $t("interfaces.no_interfaces_description") }}
-
-
-
-
- Discover Interfaces (Peer) + +
+
+
+
+ Discovered Interfaces +
+
+ Recently Heard Announces +
+
+ Cards appear/disappear as announces are heard. Connected entries show a green + pill; disconnected entries are dimmed with a red label. +
+
+
+ + +
+
+ +
+ No discovered interfaces yet. +
+ +
+
+
+ +
+
+ + {{ $t("app.disabled") }} +
+
+ +
+ +
+ +
+
+
+ {{ iface.name }} +
+ {{ iface.type }} +
+ +
+ + Stamps: {{ iface.value }} + + + Connected + +
+ +
+ Hops: {{ iface.hops }} + {{ + iface.status + }} + + Heard: {{ formatLastHeard(iface.last_heard) }} + +
+ +
+
+ + Address: {{ iface.reachable_on }}:{{ iface.port }} +
+ +
+ + Transport ID: {{ iface.transport_id }} +
+ +
+ + Network ID: {{ iface.network_id }} +
+ +
+ + Loc: {{ iface.latitude }}, {{ iface.longitude }} +
+ +
+ + TX {{ discoveredBytes(iface).tx }} · RX + {{ discoveredBytes(iface).rx }} +
+
+
+ +
+ +
+
+
+
+
+
+ +
+
+
+
+
+ Discovery +
+
+ Interface Discovery +
+
+ Publish your interfaces for others to find, or listen for announced entrypoints + and auto-connect to them. +
+
+ + + Configure Per-Interface + +
+
+
+
Publish (Server)
+
+ Enable discovery while adding or editing an interface to broadcast reachable + details. Reticulum will sign and stamp announces automatically.
- Listen for discovery announces and optionally auto-connect to available - interfaces. + Requires LXMF in the Python environment. Transport is optional for publishing, + but usually recommended so peers can connect back.
- -
-
-
-
- Allowed Sources +
+
+
+
+ Discover Interfaces (Peer) +
+
+ Listen for discovery announces and optionally auto-connect to available + interfaces. +
+
+
- -
-
-
- Required Stamp Value +
+
+
+ Allowed Sources +
+ +
+
+
+ Required Stamp Value +
+ +
+
+
+ Auto-connect Slots +
+ +
+ 0 disables auto-connect. +
+
+
+
+ Network Identity Path +
+ +
- -
-
-
- Auto-connect Slots +
+
- -
0 disables auto-connect.
-
-
- Network Identity Path -
- -
-
-
-
- -
- -
@@ -286,6 +520,10 @@ export default { network_identity: "", }, savingDiscovery: false, + discoveredInterfaces: [], + discoveredActive: [], + discoveryInterval: null, + activeTab: "overview", }; }, computed: { @@ -357,19 +595,57 @@ export default { this.interfacesWithStats.forEach((iface) => types.add(iface.type)); return Array.from(types).sort(); }, + sortedDiscoveredInterfaces() { + return [...this.discoveredInterfaces].sort((a, b) => (b.last_heard || 0) - (a.last_heard || 0)); + }, + interfacesWithLocation() { + return this.discoveredInterfaces.filter((iface) => iface.latitude != null && iface.longitude != null); + }, + activeInterfaceStats() { + return Object.values(this.interfaceStats || {}); + }, + tabChipClass() { + return (isActive) => (isActive ? "primary-chip text-xs" : "secondary-chip text-xs"); + }, + discoveredActiveSet() { + const set = new Set(); + this.discoveredActive.forEach((a) => { + const host = a.target_host || a.remote || a.listen_ip; + const port = a.target_port || a.listen_port; + if (host && port) { + set.add(`${host}:${port}`); + } + }); + return set; + }, + discoveredActiveTransportIds() { + const set = new Set(); + this.discoveredActive.forEach((a) => { + if (a.transport_id) { + set.add(a.transport_id); + } + }); + return set; + }, }, beforeUnmount() { clearInterval(this.reloadInterval); + clearInterval(this.discoveryInterval); }, mounted() { this.loadInterfaces(); this.updateInterfaceStats(); this.loadDiscoveryConfig(); + this.loadDiscoveredInterfaces(); // update info every few seconds this.reloadInterval = setInterval(() => { this.updateInterfaceStats(); }, 1000); + + this.discoveryInterval = setInterval(() => { + this.loadDiscoveredInterfaces(); + }, 5000); }, methods: { relaunch() { @@ -506,6 +782,77 @@ export default { this.trackInterfaceChange(); } }, + async loadDiscoveredInterfaces() { + try { + const response = await window.axios.get(`/api/v1/reticulum/discovered-interfaces`); + this.discoveredInterfaces = response.data?.interfaces ?? []; + this.discoveredActive = response.data?.active ?? []; + } catch (e) { + console.log(e); + } + }, + formatLastHeard(ts) { + const seconds = Math.max(0, Math.floor(Date.now() / 1000 - ts)); + if (seconds < 60) return `${seconds}s ago`; + if (seconds < 3600) return `${Math.floor(seconds / 60)}m ago`; + if (seconds < 86400) return `${Math.floor(seconds / 3600)}h ago`; + return `${Math.floor(seconds / 86400)}d ago`; + }, + isDiscoveredConnected(iface) { + const reach = iface.reachable_on; + const port = iface.port; + if (iface.transport_id && this.discoveredActiveTransportIds.has(iface.transport_id)) { + return true; + } + if (reach && port && this.discoveredActiveSet && this.discoveredActiveSet.has(`${reach}:${port}`)) { + return true; + } + return this.activeInterfaceStats.some((s) => { + const hostMatch = + (s.target_host && reach && s.target_host === reach) || (s.remote && reach && s.remote === reach); + const portMatch = + (s.target_port && port && Number(s.target_port) === Number(port)) || + (s.listen_port && port && Number(s.listen_port) === Number(port)); + return hostMatch && portMatch && (s.connected || s.online); + }); + }, + goToMap(iface) { + if (iface.latitude == null || iface.longitude == null) return; + this.$router.push({ + name: "map", + query: { + lat: iface.latitude, + lon: iface.longitude, + label: iface.name, + }, + }); + }, + mapAllDiscovered() { + this.$router.push({ + name: "map", + query: { view: "discovered" }, + }); + }, + discoveredBytes(iface) { + const reach = iface.reachable_on; + const port = iface.port; + const stats = this.activeInterfaceStats || []; + const match = stats.find((s) => { + const host = s.target_host || s.remote || s.interface_name; + const p = s.target_port || s.listen_port; + const hostMatch = host && reach && host === reach; + const portMatch = p && port && Number(p) === Number(port); + return hostMatch && portMatch; + }); + if (!match) return null; + return { + tx: this.formatBytes(match.txb || 0), + rx: this.formatBytes(match.rxb || 0), + }; + }, + formatBytes(bytes) { + return Utils.formatBytes(bytes || 0); + }, parseBool(value) { if (typeof value === "string") { return ["true", "yes", "1", "y", "on"].includes(value.toLowerCase()); @@ -565,6 +912,39 @@ export default { this.savingDiscovery = false; } }, + getDiscoveryIcon(iface) { + switch (iface.type) { + case "AutoInterface": + return "home-automation"; + case "RNodeInterface": + return iface.port && iface.port.toString().startsWith("tcp://") ? "lan-connect" : "radio-tower"; + case "RNodeMultiInterface": + return "access-point-network"; + case "TCPClientInterface": + case "BackboneInterface": + return "lan-connect"; + case "TCPServerInterface": + return "lan"; + case "UDPInterface": + return "wan"; + case "SerialInterface": + return "usb-port"; + case "KISSInterface": + case "AX25KISSInterface": + return "antenna"; + case "I2PInterface": + return "eye"; + case "PipeInterface": + return "pipe"; + default: + return "server-network"; + } + }, + copyToClipboard(text, label) { + if (!text) return; + navigator.clipboard.writeText(text); + ToastUtils.success(`${label} copied to clipboard`); + }, setStatusFilter(value) { this.statusFilter = value; }, diff --git a/meshchatx/src/frontend/components/map/MapPage.vue b/meshchatx/src/frontend/components/map/MapPage.vue index 85c7219..b8aeb4a 100644 --- a/meshchatx/src/frontend/components/map/MapPage.vue +++ b/meshchatx/src/frontend/components/map/MapPage.vue @@ -28,6 +28,13 @@
+ +
+ +
+ + +
+
+
+
+
+
+ + All Messages +
+
+ + Uncategorized +
+
+ + {{ folder.name }} + +
+
+
+
+
+
+
+ + + {{ selectedHashes.size }} selected + +
+
+ + +
+ +
+ + +
+
+
+
+
+
+ Move to Folder +
+ +
+ +
+
+ +
+
@@ -340,6 +626,7 @@