This commit is contained in:
Sudo-Ivan
2024-12-30 03:50:52 -06:00
parent 0f5f5cbb13
commit decbd8f29a
16 changed files with 1046 additions and 970 deletions

View File

@@ -2,13 +2,12 @@ package resource
import (
"crypto/sha256"
"encoding/binary"
"errors"
"io"
"path/filepath"
"strings"
"sync"
"time"
"path/filepath"
)
const (
@@ -19,88 +18,89 @@ const (
STATUS_CANCELLED = 0x04
DEFAULT_SEGMENT_SIZE = 384 // Based on ENCRYPTED_MDU
MAX_SEGMENTS = 65535
CLEANUP_INTERVAL = 300 // 5 minutes
MAX_SEGMENTS = 65535
CLEANUP_INTERVAL = 300 // 5 minutes
// Window size constants
WINDOW = 4
WINDOW_MIN = 2
WINDOW_MAX_SLOW = 10
WINDOW_MIN = 2
WINDOW_MAX_SLOW = 10
WINDOW_MAX_VERY_SLOW = 4
WINDOW_MAX_FAST = 75
WINDOW_MAX = WINDOW_MAX_FAST
WINDOW_MAX_FAST = 75
WINDOW_MAX = WINDOW_MAX_FAST
// Rate thresholds
FAST_RATE_THRESHOLD = WINDOW_MAX_SLOW - WINDOW - 2
FAST_RATE_THRESHOLD = WINDOW_MAX_SLOW - WINDOW - 2
VERY_SLOW_RATE_THRESHOLD = 2
// Transfer rates (bytes per second)
RATE_FAST = (50 * 1000) / 8 // 50 Kbps
RATE_VERY_SLOW = (2 * 1000) / 8 // 2 Kbps
RATE_FAST = (50 * 1000) / 8 // 50 Kbps
RATE_VERY_SLOW = (2 * 1000) / 8 // 2 Kbps
// Window flexibility
WINDOW_FLEXIBILITY = 4
// Hash and segment constants
MAPHASH_LEN = 4
MAPHASH_LEN = 4
RANDOM_HASH_SIZE = 4
// Size limits
MAX_EFFICIENT_SIZE = 16*1024*1024 - 1 // ~16MB
MAX_EFFICIENT_SIZE = 16*1024*1024 - 1 // ~16MB
AUTO_COMPRESS_MAX_SIZE = MAX_EFFICIENT_SIZE
// Timeouts and retries
PART_TIMEOUT_FACTOR = 4
PART_TIMEOUT_FACTOR = 4
PART_TIMEOUT_FACTOR_AFTER_RTT = 2
PROOF_TIMEOUT_FACTOR = 3
MAX_RETRIES = 16
MAX_ADV_RETRIES = 4
SENDER_GRACE_TIME = 10.0
PROCESSING_GRACE = 1.0
RETRY_GRACE_TIME = 0.25
PER_RETRY_DELAY = 0.5
PROOF_TIMEOUT_FACTOR = 3
MAX_RETRIES = 16
MAX_ADV_RETRIES = 4
SENDER_GRACE_TIME = 10.0
PROCESSING_GRACE = 1.0
RETRY_GRACE_TIME = 0.25
PER_RETRY_DELAY = 0.5
)
type Resource struct {
mutex sync.RWMutex
mutex sync.RWMutex
data []byte
fileHandle io.ReadWriteSeeker
fileName string
hash []byte
randomHash []byte
originalHash []byte
status byte
compressed bool
autoCompress bool
encrypted bool
split bool
segments uint16
segmentIndex uint16
totalSegments uint16
completedParts map[uint16]bool
transferSize int64
dataSize int64
progress float64
window int
windowMax int
windowMin int
windowFlexibility int
rtt float64
fastRateRounds int
status byte
compressed bool
autoCompress bool
encrypted bool
split bool
segments uint16
segmentIndex uint16
totalSegments uint16
completedParts map[uint16]bool
transferSize int64
dataSize int64
progress float64
window int
windowMax int
windowMin int
windowFlexibility int
rtt float64
fastRateRounds int
verySlowRateRounds int
createdAt time.Time
completedAt time.Time
callback func(*Resource)
progressCallback func(*Resource)
createdAt time.Time
completedAt time.Time
callback func(*Resource)
progressCallback func(*Resource)
}
func New(data interface{}, autoCompress bool) (*Resource, error) {
r := &Resource{
status: STATUS_PENDING,
compressed: false,
autoCompress: autoCompress,
autoCompress: autoCompress,
completedParts: make(map[uint16]bool),
createdAt: time.Now(),
progress: 0.0,
createdAt: time.Now(),
progress: 0.0,
}
switch v := data.(type) {
@@ -118,6 +118,10 @@ func New(data interface{}, autoCompress bool) (*Resource, error) {
if err != nil {
return nil, err
}
if namer, ok := v.(interface{ Name() string }); ok {
r.fileName = namer.Name()
}
default:
return nil, errors.New("unsupported data type")
}
@@ -138,10 +142,10 @@ func New(data interface{}, autoCompress bool) (*Resource, error) {
r.transferSize = int64(float64(r.dataSize) * compressibility)
} else if r.fileHandle != nil {
// For file handles, use extension-based estimation
ext := strings.ToLower(filepath.Ext(r.fileHandle.Name()))
ext := strings.ToLower(filepath.Ext(r.fileName))
r.transferSize = estimateFileCompression(r.dataSize, ext)
}
// Ensure minimum size and add compression overhead
if r.transferSize < r.dataSize/10 {
r.transferSize = r.dataSize / 10
@@ -223,7 +227,7 @@ func (r *Resource) IsCompressed() bool {
func (r *Resource) Cancel() {
r.mutex.Lock()
defer r.mutex.Unlock()
if r.status == STATUS_PENDING || r.status == STATUS_ACTIVE {
r.status = STATUS_CANCELLED
r.completedAt = time.Now()
@@ -342,13 +346,13 @@ func estimateCompressibility(data []byte) float64 {
if len(data) < sampleSize {
sampleSize = len(data)
}
// Count unique bytes in sample
uniqueBytes := make(map[byte]struct{})
for i := 0; i < sampleSize; i++ {
uniqueBytes[data[i]] = struct{}{}
}
// Calculate entropy-based compression estimate
uniqueRatio := float64(len(uniqueBytes)) / float64(sampleSize)
return 0.3 + (0.7 * uniqueRatio) // Base compression ratio between 0.3 and 1.0
@@ -357,13 +361,13 @@ func estimateCompressibility(data []byte) float64 {
func estimateFileCompression(size int64, extension string) int64 {
// Compression ratio estimates based on common file types
compressionRatios := map[string]float64{
".txt": 0.4, // Text compresses well
".txt": 0.4, // Text compresses well
".log": 0.4,
".json": 0.4,
".xml": 0.4,
".html": 0.4,
".csv": 0.5,
".doc": 0.8, // Already compressed
".doc": 0.8, // Already compressed
".docx": 0.95,
".pdf": 0.95,
".jpg": 0.99, // Already compressed
@@ -376,11 +380,11 @@ func estimateFileCompression(size int64, extension string) int64 {
".gz": 0.99,
".rar": 0.99,
}
ratio, exists := compressionRatios[extension]
if !exists {
ratio = 0.7 // Default compression ratio for unknown types
}
return int64(float64(size) * ratio)
}
}