mirror of
https://github.com/landandair/Reticulum-Decentralized-File-Server.git
synced 2025-12-22 09:27:08 +00:00
- Nodes send correctly
- Solved bug in child node list not checking presence before adding node
This commit is contained in:
@@ -1,5 +1,4 @@
|
||||
import os
|
||||
import shutil
|
||||
from logging import getLogger
|
||||
import pickle
|
||||
import json
|
||||
@@ -69,7 +68,6 @@ class CidStore:
|
||||
|
||||
def add_data(self, hash, data: bytes):
|
||||
"""Adds node data gained in response from request"""
|
||||
print(data)
|
||||
node = self.get_node_obj(hash)
|
||||
if node and self.get_parent_hashes(hash):
|
||||
source = self.get_parent_hashes(hash)
|
||||
@@ -77,16 +75,13 @@ class CidStore:
|
||||
if source[0] != self.source_hash: # Add data only if it doesn't conflict with our data
|
||||
if self.is_storage_hash(hash):
|
||||
if hash == self.get_data_hash(node.parent, data, include_source=False):
|
||||
print(data, 'adding node')
|
||||
self.add_node(node.name, node.parent, node.type, node.time_stamp, data, stored=True)
|
||||
else:
|
||||
logger.warning(
|
||||
f"Expected data hash of {hash} but got {self.get_data_hash(node.parent, data, include_source=False)} instead.")
|
||||
else: # Try to decode as a json store and load the dictionary
|
||||
data_dict = json.loads(data)
|
||||
print(data_dict)
|
||||
self.add_node_dict(data_dict)
|
||||
print(self.index)
|
||||
else:
|
||||
data_dict = json.loads(data)
|
||||
self.add_node_dict(data_dict)
|
||||
@@ -141,7 +136,8 @@ class CidStore:
|
||||
stored = True
|
||||
else: # Not a storage node, so calculate hash based on source path
|
||||
hash_digest = self.get_path_hash(parent)
|
||||
self.get_node_obj(parent).children.append(hash_digest)
|
||||
if hash_digest not in self.get_node_obj(parent).children:
|
||||
self.get_node_obj(parent).children.append(hash_digest)
|
||||
self.index[hash_digest] = Cid(hash_digest, name, time_stamp, size, parent, children, stored, node_type)
|
||||
if size and node_type == Cid.TYPE_FILE: # Is of type file so break into chunks
|
||||
for i, pos in enumerate(range(0, size, self.chunk_size)):
|
||||
@@ -150,7 +146,6 @@ class CidStore:
|
||||
elif size:
|
||||
with open(self.get_data_path(hash_digest), 'wb') as f:
|
||||
f.write(data_store)
|
||||
print('hash_gen', hash_digest)
|
||||
return hash_digest
|
||||
|
||||
def get_data_path(self, node_hash): # get data path
|
||||
@@ -192,7 +187,7 @@ class CidStore:
|
||||
in binary. Return nothing if no data was found"""
|
||||
self.check_is_stored(hash) # Update all storage status for nodes
|
||||
node = self.get_node_obj(hash)
|
||||
print('Generating data for', node)
|
||||
logger.info(f'Generating data for: {node}')
|
||||
if node:
|
||||
if node.type != Cid.TYPE_CHUNK: # look for node information
|
||||
info = self.get_node_information(hash)
|
||||
@@ -335,3 +330,7 @@ if __name__ == '__main__':
|
||||
for hash in file_data[f_hash]['children']:
|
||||
print(store.get_node(hash))
|
||||
store.save_index()
|
||||
store2 = CidStore('store_2', '123456', 'hermes2')
|
||||
store2.add_node_dict(json.loads(store.get_node('12345')))
|
||||
print(store2.get_node('12345'))
|
||||
print(store.get_node('12345'))
|
||||
|
||||
@@ -1,15 +1,22 @@
|
||||
import argparse
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
import cid_store
|
||||
from rns_interface import RNSInterface
|
||||
import server_command_state
|
||||
import RNS
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def main(args):
|
||||
# Cid_store_args
|
||||
store_path = args.path
|
||||
max_size = args.max_file_size
|
||||
# Cid_store_args
|
||||
if not os.path.exists(store_path):
|
||||
os.mkdir(store_path)
|
||||
server_name = 'test'
|
||||
|
||||
# Reticulum interface args
|
||||
@@ -24,14 +31,26 @@ def main(args):
|
||||
server_identity = RNS.Identity()
|
||||
server_identity.to_file(identity_path)
|
||||
|
||||
# Set up logger
|
||||
log_path = os.path.join(store_path, 'rnfs.log')
|
||||
my_handler = RotatingFileHandler(log_path, mode='w', maxBytes=5 * 1024 * 1024,
|
||||
backupCount=2, delay=False)
|
||||
logging.basicConfig(level=logging.DEBUG,
|
||||
format="%(asctime)s %(name)-10s %(levelname)-8s %(message)s",
|
||||
datefmt="%y-%m-%d %H:%M:%S",
|
||||
handlers=[
|
||||
my_handler,
|
||||
logging.StreamHandler()
|
||||
])
|
||||
|
||||
# Make cid_store
|
||||
store = cid_store.CidStore(store_path, server_identity.hexhash, server_name)
|
||||
# Make rns interface
|
||||
rns_interface = RNSInterface(store, server_identity, allow_all=True)
|
||||
# Make main command
|
||||
server_command = server_command_state.ServerCommandState(rns_interface, store)
|
||||
server_command = server_command_state.ServerCommandState(rns_interface, store, max_size)
|
||||
try:
|
||||
print(f'Starting server using identity of: {server_identity.hexhash}')
|
||||
logger.info(f'Starting server using identity of: {server_identity.hexhash}')
|
||||
# store.add_file_node('../README.md', 'readme.md')
|
||||
# store.add_file_node('../test/hello.txt', 'test.txt')
|
||||
while True:
|
||||
@@ -47,5 +66,9 @@ def main(args):
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser('Reticulum file server')
|
||||
parser.add_argument('-p', '--path', default='store', type=str, help='Path to storage directory')
|
||||
parser.add_argument('-m', '--max_file_size', default=5000, type=int, help='max size of files automatically '
|
||||
'accepted in bytes')
|
||||
parser.add_argument('-c', '--config_path', default=None, type=str, help='Path to RNS config')
|
||||
parser.add_argument('name', type=str, help='Nickname of server')
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
main(args)
|
||||
|
||||
@@ -26,6 +26,7 @@ from cid_store import CidStore
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
class RNSInterface:
|
||||
app_name = "Reticulum-File-Server"
|
||||
REQUEST_HASH_ID = "RH" # Request hash: Request hash
|
||||
@@ -96,17 +97,16 @@ class RNSInterface:
|
||||
|
||||
def client_disconnected(self, link: RNS.Link):
|
||||
"""TODO: Determine the cause of the cut adjust accordingly"""
|
||||
print(f'{link.status}: link cut')
|
||||
if link.teardown_reason == RNS.Link.TIMEOUT:
|
||||
RNS.log("The link timed out, exiting now")
|
||||
elif link.teardown_reason == RNS.Link.DESTINATION_CLOSED:
|
||||
RNS.log("The link was closed by the server, exiting now")
|
||||
logger.debug("The link was closed by the server")
|
||||
else:
|
||||
RNS.log("Link closed, exiting now")
|
||||
self.currently_linked = False
|
||||
|
||||
def handle_announce(self, destination_hash, announced_identity: RNS.Identity, app_data):
|
||||
RNS.log(
|
||||
logger.debug(
|
||||
"Received an announce from " +
|
||||
RNS.prettyhexrep(destination_hash)
|
||||
)
|
||||
@@ -149,7 +149,7 @@ class RNSInterface:
|
||||
|
||||
def handle_node_present(self, source, hash):
|
||||
"""See if we wanted the node and don't have it"""
|
||||
print('Checking if we wanted present node(make note of who owns it)')
|
||||
logger.info(f'{hash}: Checking if we wanted present node(make note of who owns it)')
|
||||
if hash in self.desired_hash_translation_map: # See if we wanted it
|
||||
sources, _, _ = self.desired_hash_translation_map[hash]
|
||||
source_ident = RNS.Identity.recall(bytes.fromhex(source))
|
||||
@@ -169,7 +169,7 @@ class RNSInterface:
|
||||
create_receipt=False)
|
||||
packet.send()
|
||||
if hash_str not in self.desired_hash_translation_map:
|
||||
self.desired_hash_translation_map[hash_str] = ([], 0, time.time())
|
||||
self.desired_hash_translation_map[hash_str] = ([], 0, time.time()+60)
|
||||
RNS.log(f'RNSFS: Requesting presence of hash in network')
|
||||
else:
|
||||
RNS.log('RNSFS: Already requested this hash on network')
|
||||
@@ -200,7 +200,7 @@ class RNSInterface:
|
||||
if receipt:
|
||||
self.request_id_to_hash[receipt.get_request_id()] = hash_str
|
||||
except:
|
||||
print('error')
|
||||
logger.warning('Error: unknown error while making request')
|
||||
|
||||
def got_response_data(self, response_rec: RNS.RequestReceipt):
|
||||
request_id = response_rec.get_request_id()
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
from collections import deque
|
||||
from logging import getLogger
|
||||
|
||||
from rns_interface import RNSInterface
|
||||
from cid_store import CidStore
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
class ServerCommandState:
|
||||
def __init__(self, rns_interface:RNSInterface, cid_store:CidStore, max_file_size=-1):
|
||||
self.rns_interface = rns_interface
|
||||
@@ -25,8 +28,8 @@ class ServerCommandState:
|
||||
"""Called when the cid storage has added any nodes from a dictionary(json file)"""
|
||||
node = self.cid_store.get_node_obj(node_hash)
|
||||
if node.type == node.TYPE_CHUNK or node.type == node.TYPE_FILE: # Check if it is a file
|
||||
print(node.size, self.max_file_size)
|
||||
logger.debug(f'learned about new file node of Size-{node.size}')
|
||||
if node.size < self.max_file_size or self.max_file_size == -1: # Check if it is within size limits
|
||||
if not self.cid_store.check_is_stored(node.hash): # Check if it is already stored
|
||||
print(f"RNFS Manager: Automatically requesting {node_hash}")
|
||||
logger.info(f"RNFS Manager: Automatically requesting {node_hash}")
|
||||
self.rns_interface.make_hash_desire_request(node_hash)
|
||||
|
||||
Reference in New Issue
Block a user