Merge pull request #5 from landandair/Gui_Compatibility_Fixes

Gui compatibility fixes
This commit is contained in:
landandair
2025-08-09 20:05:55 -05:00
committed by GitHub
5 changed files with 74 additions and 37 deletions

View File

@@ -100,6 +100,7 @@ class CidStore:
else: # TODO: Check if what is in the index is older or of a more reliable source
logger.warning('Received node dictionary that was already in source node')
# TODO: Mop up after node addition by removing all dereference nodes
self.save_index() # Save index after modifying it
def set_update_callback(self, callback):
self.callback = callback
@@ -172,7 +173,7 @@ class CidStore:
size = len(data_store)
stored = True
else: # Not a storage node, so calculate hash based on source path
hash_digest = self.get_path_hash(parent)
hash_digest = self.get_path_hash(parent, name)
if hash_digest not in self.get_node_obj(parent).children:
self.get_node_obj(parent).children.append(hash_digest)
self.index[hash_digest] = Cid(hash_digest, name, time_stamp, size, parent, children, stored, node_type)
@@ -196,12 +197,15 @@ class CidStore:
os.mkdir(path)
path = os.path.join(path, node.hash)
return path
return None
def get_path_hash(self, parent_hash):
def get_path_hash(self, parent_hash, name=None):
"""Get hash associated with path to current hash to the source node"""
hash_alg = self.hash_alg.copy() # Hash alg for forming main file hash
parents = self.get_parent_hashes(parent_hash)
parents.append(parent_hash) # full parent list for forming storage path
if name: # Add in the name to the hash like in the case of directories which can be functionally identical
parents.append(name)
hash_alg.update(''.join(parents).encode('utf8'))
hash_digest = hash_alg.hexdigest()
return hash_digest
@@ -218,6 +222,7 @@ class CidStore:
def get_node_obj(self, hash):
if hash in self.index:
return self.index[hash]
return None
def get_node(self, hash):
"""Get data associated with node either its information about its children, or the file chunk itself packaged
@@ -232,9 +237,11 @@ class CidStore:
info = self.get_node_information(hash)
if len(info) >= 1:
return json.dumps(info) # Return json encoded data
return None
else: # Look for data chunk to return
data = self.get_data(hash)
return data
return None
def get_data(self, hash):
"""Blindly retrieve associated chunk data"""
@@ -248,8 +255,11 @@ class CidStore:
return data
else:
logger.warning(f'Data stored in {node.hash} did not match hash')
return None
else:
node.is_stored = False
return None
return None
def get_node_information(self, hash, initial_req=True):
"""Returns a dict of all node information below hash in tree"""
@@ -280,8 +290,7 @@ class CidStore:
if node:
if node.type == Cid.TYPE_CHUNK:
return True
else:
return False
return False
def get_parent_hashes(self, node_hash):
"""Returns a list of parent hashes starting at source"""
@@ -333,6 +342,7 @@ class CidStore:
if node.type == node.TYPE_CHUNK:
path = self.get_data_path(hash_id)
os.remove(path)
self.remove_hash(hash_id)
def clean_hash_data(self, hash_id):

View File

@@ -1,5 +1,6 @@
from threading import Thread
import io
import base64
import flask
from flask_classful import FlaskView, request, route
@@ -13,15 +14,10 @@ class RNFSView(FlaskView):
''')
@route("/site-map")
def site_map(self):
links = []
# for rule in .url_map.iter_rules():
# # Filter out rules we can't navigate to in a browser
# # and rules that require parameters
# if "GET" in rule.methods and has_no_empty_params(rule):
# url = url_for(rule.endpoint, **(rule.defaults or {}))
# links.append((url, rule.endpoint))
@route("/getStatus", methods=['GET'], endpoint='getStatus')
def get_status(self):
"""Return status/queue of the file server for insight into what its doing"""
return self.info.get_status()
@route('/getNode/<id>', methods=['GET'], endpoint='getNode')
def get_node(self, id=None):
@@ -50,7 +46,6 @@ class RNFSView(FlaskView):
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flask.flash('No file part')
return flask.redirect(request.url)
file = request.files['file']
parent = request.form['parent']
@@ -60,7 +55,11 @@ class RNFSView(FlaskView):
flask.flash('No selected file')
return flask.redirect(request.url)
if file:
self.info.upload_file(file.filename, file.stream.read(), parent)
encoding = file.headers.get('Content-Transfer-Encoding')
body = file.stream.read()
if encoding == 'base64':
body = base64.b64decode(body)
self.info.upload_file(file.filename, body, parent)
return flask.redirect(flask.url_for('uploadData',
filename=file.filename))
return '''

View File

@@ -65,16 +65,12 @@ def main(args):
max_file_size=max_size)
for i in store.index:
print(store.index[i])
try:
logger.info(f'Starting server using identity of: {server_destination.hexhash}')
while True:
pass # Put a waiting loop here with basic announce functionality
inp = input()
if inp:
print("Announcing")
rns_interface.send_announce()
finally:
store.save_index() # ensure index gets saved
logger.info(f'Starting server using identity of: {server_destination.hexhash}')
while True:
pass # Put a waiting loop here with basic announce functionality
inp = input()
if inp:
rns_interface.send_announce()
if __name__ == '__main__':

View File

@@ -17,8 +17,9 @@ Periodic Checksum: Used for maintaining consistency across server instances ensu
1. Announce(CS): Check sum of destination source index(sort supplied hashes combine them and calculate hash)
2. Announce(RH): Requestor(s) requests hash of the destination and updates index accordingly
"""
import os.path
import os
import time
import json
from threading import Thread
import random
from logging import getLogger
@@ -36,14 +37,14 @@ class RNSInterface:
CHECKSUM_ID = "CS" # Checksum of whole destination index check against local copy
def __init__(self, cid_store: CidStore, server_destination: RNS.Destination, allowed_dest_path: str, allow_all=False):
self.hash_requests = [] # List of hashes requested from network
self.desired_hash_translation_map = {} # List of hashes requested from network and their sources
self.hash_progress = {} # List of hashes and their associated progress
self.cid_store = cid_store # Store of data
self.currently_linked = False # Maintain whether we are currently connected to a peer Used to limit incoming
# and outgoing requests
self.allow_all = allow_all
self.allowed_peers = self.load_allowed_peers(allowed_dest_path) # allowed peers who we will host files from
# hash translation map list of requested hashes and a list of identities who can provide it
self.desired_hash_translation_map = {}
self.request_id_to_hash = {}
self.max_allowed_attempts = 5
@@ -72,6 +73,22 @@ class RNSInterface:
RNS.Transport.register_announce_handler(announce_handler)
self.start_service_loop()
def get_status(self):
status = {}
for node_hash in self.desired_hash_translation_map:
sources, attempts, next_allowed_time = self.desired_hash_translation_map[node_hash]
progress = 0
if node_hash in self.hash_progress:
progress = self.hash_progress[node_hash]
node_obj = self.cid_store.get_node_obj(node_hash)
status[node_hash] = {
'name': node_obj.name,
'progress': progress,
'sources': sources,
'attempts': attempts,
}
return json.dumps(status)
def load_allowed_peers(self, path):
allowed_dest = []
if path or not self.allow_all:
@@ -97,6 +114,7 @@ class RNSInterface:
hash_str = data.decode('utf8')
RNS.log(f"Processing request from client for {hash_str}")
# TODO: Check if user is identified/allowed to make request in index
time.sleep(10)
return self.cid_store.get_node(hash_str)
def client_disconnected(self, link: RNS.Link):
@@ -196,10 +214,12 @@ class RNSInterface:
while not link.rtt or not link:
time.sleep(.1)
try:
self.hash_progress[hash_str] = 0
receipt = link.request('RH',
data=hash_str.encode('utf8'),
response_callback=self.got_response_data,
failed_callback=self.failed_response
failed_callback=self.failed_response,
progress_callback=self.got_progress
)
if receipt:
self.request_id_to_hash[receipt.get_request_id()] = hash_str
@@ -214,10 +234,20 @@ class RNSInterface:
self.cid_store.add_data(hash_str, response)
self.request_id_to_hash.pop(request_id)
self.desired_hash_translation_map.pop(hash_str)
self.hash_progress.pop(hash_str, None) # Remove progress when complete
response_rec.link.teardown()
def failed_response(self, response: RNS.RequestReceipt):
RNS.log("The request " + RNS.prettyhexrep(response.request_id) + " failed.")
request_id = response.get_request_id()
hash_str = self.request_id_to_hash[request_id]
self.desired_hash_translation_map.pop(hash_str)
RNS.log("The request for: " + hash_str + " failed.")
def got_progress(self, response_rec: RNS.RequestReceipt):
request_id = response_rec.get_request_id()
hash_str = self.request_id_to_hash[request_id]
progress = response_rec.get_progress()
def service_desired_hash_list(self):
"""Thread to service the desired hash dictionary"""

View File

@@ -13,8 +13,6 @@ class ServerCommandState:
self.rns_interface = rns_interface
self.cid_store = cid_store
self.cid_store.set_update_callback(callback=self.updated_hash_callback)
self.primary_req_queue = deque()
self.auto_req_queue = deque()
self.max_file_size = max_file_size
self.api_ip = host
self.api_port = port
@@ -23,10 +21,9 @@ class ServerCommandState:
def get_address(self):
return self.api_ip, self.api_port
def should_auto_req(self, new_hash):
"""TODO: Add a filter to only request hash on network if the metadata meets certain criteria(only use on files and
data chunks)"""
self.auto_req_queue.append(new_hash)
def get_status(self):
"""Get the queue of the file server and summarize the results"""
return self.rns_interface.get_status()
def get_node_info(self, node_hash):
"""Get node data associated to info"""
@@ -60,18 +57,23 @@ class ServerCommandState:
node = self.cid_store.get_node_obj(node_hash)
if node:
return node.name
return ""
def get_src_dest(self):
return self.cid_store.source_hash
def upload_file(self, file_name, file_data, parent=None):
self.cid_store.add_file(file_name, file_data, parent)
self.cid_store.save_index()
def make_dir(self, name, parent=None):
self.cid_store.add_dir(name, parent)
self.cid_store.save_index()
def delete_node(self, id):
return self.cid_store.remove_hash(id)
ret = self.cid_store.remove_hash(id)
self.cid_store.save_index()
return ret
def updated_hash_callback(self, node_hash):
"""Called when the cid storage has added any nodes from a dictionary(json file)"""