Update asset caching and documentation features
- Updated the API server to support asset caching with a new flag for enabling/disabling caching. - Implemented asset caching logic in the DownloadProxyHandler to store and retrieve assets efficiently. - Added tests for asset caching functionality, ensuring proper behavior for cache hits and misses. - Introduced new documentation files for software, including multi-language support. - Enhanced the SoftwareCard component to display documentation links for software with available docs. - Updated the Software model to include a flag indicating the presence of documentation. - Improved the user interface for documentation navigation and search functionality.
This commit is contained in:
@@ -13,4 +13,9 @@ const (
|
||||
// Avatar Cache
|
||||
AvatarCacheLimit = 100 * 1024 * 1024 // 100MB
|
||||
AvatarCacheInterval = 1 * time.Hour
|
||||
|
||||
// Asset Cache
|
||||
AssetCacheDir = ".cache/assets"
|
||||
AssetCacheLimit = 2 * 1024 * 1024 * 1024 // 2GB
|
||||
AssetCacheInterval = 6 * time.Hour
|
||||
)
|
||||
|
||||
@@ -62,16 +62,20 @@ type Server struct {
|
||||
rssCache atomic.Value
|
||||
rssLastMod atomic.Value
|
||||
avatarCache string
|
||||
assetCache string
|
||||
cacheEnabled bool
|
||||
salt []byte
|
||||
}
|
||||
|
||||
func NewServer(token string, initialSoftware []models.Software, statsService *stats.Service) *Server {
|
||||
func NewServer(token string, initialSoftware []models.Software, statsService *stats.Service, cacheEnabled bool) *Server {
|
||||
s := &Server{
|
||||
GiteaToken: token,
|
||||
SoftwareList: &SoftwareCache{data: initialSoftware},
|
||||
Stats: statsService,
|
||||
urlMap: make(map[string]string),
|
||||
avatarCache: ".cache/avatars",
|
||||
assetCache: AssetCacheDir,
|
||||
cacheEnabled: cacheEnabled,
|
||||
}
|
||||
|
||||
s.loadSalt()
|
||||
@@ -82,6 +86,13 @@ func NewServer(token string, initialSoftware []models.Software, statsService *st
|
||||
log.Printf("Warning: failed to create avatar cache directory: %v", err)
|
||||
}
|
||||
|
||||
if s.cacheEnabled {
|
||||
if err := os.MkdirAll(s.assetCache, 0750); err != nil {
|
||||
log.Printf("Warning: failed to create asset cache directory: %v", err)
|
||||
}
|
||||
go s.startAssetCleanup()
|
||||
}
|
||||
|
||||
s.RefreshProxiedList()
|
||||
|
||||
go s.startAvatarCleanup()
|
||||
@@ -176,8 +187,23 @@ func (s *Server) startAvatarCleanup() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) startAssetCleanup() {
|
||||
ticker := time.NewTicker(AssetCacheInterval)
|
||||
for range ticker.C {
|
||||
s.cleanupAssetCache()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) cleanupAssetCache() {
|
||||
s.cleanupCacheDir(s.assetCache, AssetCacheLimit)
|
||||
}
|
||||
|
||||
func (s *Server) cleanupAvatarCache() {
|
||||
files, err := os.ReadDir(s.avatarCache)
|
||||
s.cleanupCacheDir(s.avatarCache, AvatarCacheLimit)
|
||||
}
|
||||
|
||||
func (s *Server) cleanupCacheDir(dir string, limit int64) {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -194,7 +220,7 @@ func (s *Server) cleanupAvatarCache() {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(s.avatarCache, f.Name())
|
||||
path := filepath.Join(dir, f.Name())
|
||||
info, err := f.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
@@ -207,7 +233,7 @@ func (s *Server) cleanupAvatarCache() {
|
||||
})
|
||||
}
|
||||
|
||||
if totalSize <= AvatarCacheLimit {
|
||||
if totalSize <= limit {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -217,14 +243,14 @@ func (s *Server) cleanupAvatarCache() {
|
||||
})
|
||||
|
||||
for _, info := range infos {
|
||||
if totalSize <= AvatarCacheLimit {
|
||||
if totalSize <= limit {
|
||||
break
|
||||
}
|
||||
if err := os.Remove(info.path); err == nil {
|
||||
totalSize -= info.size
|
||||
}
|
||||
}
|
||||
log.Printf("Avatar cache cleaned up. Current size: %v bytes", totalSize)
|
||||
log.Printf("Cache directory %s cleaned up. Current size: %v bytes", dir, totalSize)
|
||||
}
|
||||
|
||||
func (s *Server) APISoftwareHandler(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -328,6 +354,23 @@ func (s *Server) DownloadProxyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
s.Stats.DownloadStats.Unlock()
|
||||
|
||||
cachePath := filepath.Join(s.assetCache, id)
|
||||
if s.cacheEnabled {
|
||||
if _, err := os.Stat(cachePath); err == nil {
|
||||
now := time.Now()
|
||||
_ = os.Chtimes(cachePath, now, now)
|
||||
|
||||
// Update stats for cached download
|
||||
s.Stats.GlobalStats.Lock()
|
||||
s.Stats.GlobalStats.SuccessDownloads[fingerprint] = true
|
||||
s.Stats.GlobalStats.Unlock()
|
||||
s.Stats.SaveHashes()
|
||||
|
||||
http.ServeFile(w, r, cachePath)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", targetURL, nil)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to create request", http.StatusInternalServerError)
|
||||
@@ -374,7 +417,31 @@ func (s *Server) DownloadProxyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
Stats: s.Stats,
|
||||
}
|
||||
|
||||
n, err := io.Copy(w, tr)
|
||||
var body io.Reader = tr
|
||||
var tempFile *os.File
|
||||
if s.cacheEnabled && resp.StatusCode == http.StatusOK {
|
||||
var err error
|
||||
tempFile, err = os.CreateTemp(s.assetCache, "asset-*")
|
||||
if err == nil {
|
||||
body = io.TeeReader(tr, tempFile)
|
||||
} else {
|
||||
log.Printf("Warning: failed to create temp file for caching: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
n, err := io.Copy(w, body)
|
||||
if tempFile != nil {
|
||||
tempFile.Close()
|
||||
if err == nil {
|
||||
if renameErr := os.Rename(tempFile.Name(), cachePath); renameErr != nil {
|
||||
log.Printf("Warning: failed to rename temp file to cache: %v", renameErr)
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}
|
||||
} else {
|
||||
_ = os.Remove(tempFile.Name())
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Error copying proxy response: %v", err)
|
||||
}
|
||||
@@ -389,6 +456,7 @@ func (s *Server) DownloadProxyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (s *Server) LegalHandler(w http.ResponseWriter, r *http.Request) {
|
||||
doc := r.URL.Query().Get("doc")
|
||||
lang := r.URL.Query().Get("lang")
|
||||
var filename string
|
||||
|
||||
switch doc {
|
||||
@@ -404,6 +472,15 @@ func (s *Server) LegalHandler(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
path := filepath.Join(LegalDir, filename)
|
||||
if lang != "" && lang != "en" {
|
||||
ext := filepath.Ext(filename)
|
||||
base := strings.TrimSuffix(filename, ext)
|
||||
langPath := filepath.Join(LegalDir, fmt.Sprintf("%s.%s%s", base, lang, ext))
|
||||
if _, err := os.Stat(langPath); err == nil {
|
||||
path = langPath
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(path) // #nosec G304
|
||||
if err != nil {
|
||||
http.Error(w, "Document not found", http.StatusNotFound)
|
||||
|
||||
@@ -5,10 +5,12 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"software-station/internal/models"
|
||||
"software-station/internal/stats"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestHandlers(t *testing.T) {
|
||||
@@ -34,7 +36,7 @@ func TestHandlers(t *testing.T) {
|
||||
AvatarURL: "http://example.com/logo.png",
|
||||
},
|
||||
}
|
||||
server := NewServer("token", initialSoftware, statsService)
|
||||
server := NewServer("token", initialSoftware, statsService, true)
|
||||
|
||||
t.Run("APISoftwareHandler", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/api/software", nil)
|
||||
@@ -94,6 +96,78 @@ func TestHandlers(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("AssetCaching", func(t *testing.T) {
|
||||
content := []byte("cache-me-if-you-can")
|
||||
callCount := 0
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
callCount++
|
||||
w.Write(content)
|
||||
}))
|
||||
defer upstream.Close()
|
||||
|
||||
hash := server.RegisterURL(upstream.URL)
|
||||
|
||||
// First call - should go to upstream
|
||||
req := httptest.NewRequest("GET", "/api/download?id="+hash, nil)
|
||||
rr := httptest.NewRecorder()
|
||||
server.DownloadProxyHandler(rr, req)
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Fatalf("first call: expected 200, got %d", rr.Code)
|
||||
}
|
||||
if callCount != 1 {
|
||||
t.Errorf("first call: expected call count 1, got %d", callCount)
|
||||
}
|
||||
|
||||
// Verify file exists in cache
|
||||
cachePath := filepath.Join(AssetCacheDir, hash)
|
||||
if _, err := os.Stat(cachePath); os.IsNotExist(err) {
|
||||
t.Error("asset was not cached to disk")
|
||||
}
|
||||
|
||||
// Second call - should be served from cache
|
||||
req = httptest.NewRequest("GET", "/api/download?id="+hash, nil)
|
||||
rr = httptest.NewRecorder()
|
||||
server.DownloadProxyHandler(rr, req)
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Fatalf("second call: expected 200, got %d", rr.Code)
|
||||
}
|
||||
if callCount != 1 {
|
||||
t.Errorf("second call: expected call count 1 (from cache), got %d", callCount)
|
||||
}
|
||||
if rr.Body.String() != string(content) {
|
||||
t.Errorf("second call: expected content %q, got %q", string(content), rr.Body.String())
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("CacheCleanup", func(t *testing.T) {
|
||||
cacheDir := "test_cleanup_cache"
|
||||
os.MkdirAll(cacheDir, 0750)
|
||||
defer os.RemoveAll(cacheDir)
|
||||
|
||||
// Create some files
|
||||
f1 := filepath.Join(cacheDir, "old")
|
||||
f2 := filepath.Join(cacheDir, "new")
|
||||
|
||||
os.WriteFile(f1, make([]byte, 100), 0600)
|
||||
time.Sleep(10 * time.Millisecond) // Ensure different mod times
|
||||
os.WriteFile(f2, make([]byte, 100), 0600)
|
||||
|
||||
// Set mod times explicitly
|
||||
now := time.Now()
|
||||
os.Chtimes(f1, now.Add(-1*time.Hour), now.Add(-1*time.Hour))
|
||||
os.Chtimes(f2, now, now)
|
||||
|
||||
// Clean up with limit that only allows one file
|
||||
server.cleanupCacheDir(cacheDir, 150)
|
||||
|
||||
if _, err := os.Stat(f1); err == nil {
|
||||
t.Error("expected old file to be removed")
|
||||
}
|
||||
if _, err := os.Stat(f2); err != nil {
|
||||
t.Error("expected new file to be kept")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("RSSHandler", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/api/rss", nil)
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
@@ -13,6 +13,14 @@ import (
|
||||
"software-station/internal/models"
|
||||
)
|
||||
|
||||
func checkDocsExist(repo string) bool {
|
||||
// Check for .svx or .md files in the frontend docs directory
|
||||
docsDir := "frontend/src/lib/docs/software"
|
||||
_, errSvx := os.Stat(docsDir + "/" + repo + ".svx")
|
||||
_, errMd := os.Stat(docsDir + "/" + repo + ".md")
|
||||
return errSvx == nil || errMd == nil
|
||||
}
|
||||
|
||||
func LoadSoftware(path, server, token string) []models.Software {
|
||||
return LoadSoftwareExtended(path, server, token, true)
|
||||
}
|
||||
@@ -74,6 +82,7 @@ func LoadSoftwareExtended(path, server, token string, useCache bool) []models.So
|
||||
License: license,
|
||||
IsPrivate: isPrivate,
|
||||
AvatarURL: avatarURL,
|
||||
HasDocs: checkDocsExist(repo),
|
||||
}
|
||||
softwareList = append(softwareList, sw)
|
||||
if err := cache.SaveToCache(owner, repo, sw); err != nil {
|
||||
|
||||
@@ -131,6 +131,38 @@ func FetchContributors(server, token, owner, repo string) ([]models.Contributor,
|
||||
func DetectOS(filename string) string {
|
||||
lower := strings.ToLower(filename)
|
||||
|
||||
if strings.HasSuffix(lower, ".whl") {
|
||||
if strings.Contains(lower, "win") || strings.Contains(lower, "windows") {
|
||||
return models.OSWindows
|
||||
}
|
||||
if strings.Contains(lower, "macosx") || strings.Contains(lower, "darwin") {
|
||||
return models.OSMacOS
|
||||
}
|
||||
if strings.Contains(lower, "linux") {
|
||||
return models.OSLinux
|
||||
}
|
||||
return models.OSUnknown
|
||||
}
|
||||
|
||||
if strings.HasSuffix(lower, ".tar.gz") || strings.HasSuffix(lower, ".tgz") || strings.HasSuffix(lower, ".zip") {
|
||||
if strings.Contains(lower, "win") || strings.Contains(lower, "windows") {
|
||||
return models.OSWindows
|
||||
}
|
||||
if strings.Contains(lower, "mac") || strings.Contains(lower, "darwin") || strings.Contains(lower, "osx") {
|
||||
return models.OSMacOS
|
||||
}
|
||||
if strings.Contains(lower, "linux") {
|
||||
return models.OSLinux
|
||||
}
|
||||
if strings.Contains(lower, "freebsd") {
|
||||
return models.OSFreeBSD
|
||||
}
|
||||
if strings.Contains(lower, "openbsd") {
|
||||
return models.OSOpenBSD
|
||||
}
|
||||
return models.OSUnknown
|
||||
}
|
||||
|
||||
osMap := []struct {
|
||||
patterns []string
|
||||
suffixes []string
|
||||
|
||||
@@ -41,6 +41,7 @@ type Software struct {
|
||||
License string `json:"license,omitempty"`
|
||||
IsPrivate bool `json:"is_private"`
|
||||
AvatarURL string `json:"avatar_url,omitempty"`
|
||||
HasDocs bool `json:"has_docs"`
|
||||
}
|
||||
|
||||
type FingerprintData struct {
|
||||
|
||||
Reference in New Issue
Block a user