feat: update dependencies, enhance upload rate limiting, and improve UI elements

This commit is contained in:
2026-01-16 08:54:14 +01:00
parent 7a01525ca5
commit e90c4576a5
9 changed files with 156 additions and 83 deletions

View File

@@ -6,7 +6,7 @@ use serde_json::json;
use crate::{
LOG_FILE_NAME,
data_mgt::{Asset, AssetTracker},
data_mgt::{AppState, Asset},
logs::{LogEvent, LogEventType, log_event},
};
@@ -21,8 +21,24 @@ pub struct UploadRequest {
async fn api_upload(
req: HttpRequest,
body: web::Json<UploadRequest>,
assets: web::Data<AssetTracker>,
app_state: web::Data<AppState>,
) -> Result<HttpResponse, actix_web::Error> {
// Check for rate limiting
let connection_info = req.connection_info();
let uploader_ip = connection_info
.realip_remote_addr()
.map(|s| s.to_string())
.or_else(|| connection_info.peer_addr().map(|value| value.to_string()))
.ok_or_else(|| actix_web::error::ErrorBadRequest("Cannot determine client ip"))?;
match app_state.connection_tracker.is_allowed(&uploader_ip).await {
true => {}
false => {
return Ok(HttpResponse::TooManyRequests().body("Upload limit exceeded"));
}
}
// Convert to bytes
let content_bytes = if body.content_type == "text/plain" {
body.content.as_bytes().to_vec()
@@ -32,12 +48,6 @@ async fn api_upload(
Err(_) => return Ok(HttpResponse::BadRequest().body("Invalid base64 payload")),
}
};
let connection_info = req.connection_info();
let uploader_ip = connection_info
.realip_remote_addr()
.or_else(|| connection_info.peer_addr())
.unwrap_or("-")
.to_string();
let asset = crate::data_mgt::Asset::new(
body.duration,
@@ -48,7 +58,7 @@ async fn api_upload(
let id = asset.id();
log_event(LogEventType::AssetUploaded(asset.to_value()));
assets.add_asset(asset).await;
app_state.assets.add_asset(asset).await;
let response_body = json!({ "link": format!("/bhs/{}", id) });
Ok(HttpResponse::Ok().json(response_body))
}
@@ -57,11 +67,11 @@ async fn api_upload(
async fn api_get_asset(
req: HttpRequest,
path: web::Path<String>,
assets: web::Data<AssetTracker>,
app_state: web::Data<AppState>,
) -> Result<HttpResponse, actix_web::Error> {
log_event(LogEventType::HttpRequest(req.into()));
match assets.get_asset(&path.into_inner()).await {
match app_state.assets.get_asset(&path.into_inner()).await {
None => Ok(HttpResponse::NotFound().body("Asset not found")),
Some(asset) => Ok(HttpResponse::Ok()
.content_type(asset.mime())
@@ -90,11 +100,11 @@ struct ActivityItem {
}
#[get("/api/stats")]
async fn api_stats(assets: web::Data<AssetTracker>) -> Result<HttpResponse, actix_web::Error> {
async fn api_stats(app_state: web::Data<AppState>) -> Result<HttpResponse, actix_web::Error> {
use crate::LOG_DIR;
use std::fs;
let (active_assets, storage_bytes, image_count, text_count) = assets.stats_summary().await;
let (active_assets, storage_bytes, image_count, text_count) = app_state.assets.stats_summary().await;
let mut total_uploads = 0;
let mut total_deleted = 0;

View File

@@ -1,5 +1,5 @@
use std::fmt::Debug;
use std::sync::Arc;
use std::{collections::HashMap, fmt::Debug};
use anyhow::Result;
use chrono::{Duration, Utc};
@@ -7,7 +7,11 @@ use futures::lock::Mutex;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::logs::{LogEventType, log_event};
use crate::MAX_ASSETS;
use crate::{
MAX_UPLOADS_PER_HOUR_PER_USER,
logs::{LogEventType, log_event},
};
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct Asset {
@@ -85,31 +89,30 @@ impl Asset {
pub fn to_value(&self) -> Value {
serde_json::to_value(self).unwrap_or(Value::Null)
}
// pub fn save(&self) -> Result<String> {
// let id = self.id.clone();
// let path = format!("{}{}", DATA_STORAGE, self.id);
// std::fs::create_dir_all(DATA_STORAGE)?;
// std::fs::write(&path, self.to_bytes()?)?;
// Ok(id)
// }
}
#[derive(Clone)]
pub struct AssetTracker {
#[derive(Clone, Debug, Default)]
pub struct AppState {
pub assets: AssetStorage,
pub connection_tracker: RateLimiter,
}
#[derive(Clone, Debug, Default)]
pub struct AssetStorage {
assets: Arc<Mutex<Vec<Asset>>>,
}
#[allow(dead_code)]
impl AssetTracker {
impl AssetStorage {
pub fn new() -> Self {
AssetTracker {
assets: Arc::new(Mutex::new(Vec::new())),
Self {
assets: Arc::new(Mutex::new(Vec::with_capacity(MAX_ASSETS))),
}
}
pub async fn add_asset(&self, asset: Asset) {
print!("[{}] Adding asset: {}", chrono::Local::now().to_rfc3339(), asset.id());
self.assets.lock().await.push(asset);
self.show_assets().await;
}
@@ -173,7 +176,43 @@ impl AssetTracker {
}
}
pub async fn clear_assets(assets: AssetTracker) -> Result<()> {
assets.remove_expired().await;
#[derive(Clone, Debug, Default)]
pub struct RateLimiter {
pub clients: Arc<Mutex<HashMap<String, Vec<i64>>>>,
}
impl RateLimiter {
pub async fn is_allowed(&self, client_ip: &str) -> bool {
let mut clients = self.clients.lock().await;
let now = Utc::now().timestamp_millis();
let one_hour_ago = now - Duration::hours(1).num_milliseconds();
let entry = clients.entry(client_ip.to_string()).or_insert_with(Vec::new);
entry.retain(|&timestamp| timestamp > one_hour_ago);
let ret_val = if entry.len() < MAX_UPLOADS_PER_HOUR_PER_USER {
entry.push(now);
true
} else {
false
};
println!("{:?}", clients);
ret_val
}
pub async fn clear_expired(&self) {
let mut clients = self.clients.lock().await;
let now = Utc::now().timestamp_millis();
let one_hour_ago = now - Duration::hours(1).num_milliseconds();
for timestamps in clients.values_mut() {
timestamps.retain(|&timestamp| timestamp > one_hour_ago);
}
}
}
pub async fn clear_app_data(app_state: &AppState) -> Result<()> {
app_state.assets.remove_expired().await;
app_state.connection_tracker.clear_expired().await;
Ok(())
}

View File

@@ -14,6 +14,9 @@ use std::{env, fs, path::PathBuf, sync::LazyLock};
pub static HTML_DIR: &str = "data/html/";
pub static LOG_DIR: &str = "data/logs/";
pub static LOG_FILE_NAME: &str = "log.txt";
pub static MAX_ASSETS: usize = 1000;
pub static MAX_ASSET_SIZE_BYTES: usize = 3 * 1024 * 1024; // 3 MB
pub static MAX_UPLOADS_PER_HOUR_PER_USER: usize = 10;
pub static BIND_ADDR: LazyLock<String> = LazyLock::new(|| match env::var("BIND_ADDR") {
Ok(addr) => {
@@ -71,6 +74,7 @@ async fn view_asset(req: HttpRequest) -> actix_web::Result<NamedFile> {
#[route("/{tail:.*}", method = "GET", method = "POST")]
async fn catch_all(req: HttpRequest, _payload: Option<web::Json<Value>>) -> actix_web::Result<NamedFile> {
println!("Catch-all route triggered for path: {}", req.uri().path());
let response = match req.uri().path() {
path if STATIC_PAGES.contains(&path[1..].into()) => {
let file_path = HTML_DIR.to_string() + path;
@@ -108,24 +112,24 @@ async fn main() -> std::io::Result<()> {
});
println!("Rotated log file to: {}_{}", time_tag, &LOG_FILE_NAME);
}
let assets = data_mgt::AssetTracker::new();
let app_state = data_mgt::AppState::default();
println!("Starting server at http://{}:{}/", *BIND_ADDR, *BIND_PORT);
let assets_clone = assets.clone();
let inner_appt_state = app_state.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(1));
loop {
interval.tick().await;
if let Err(e) = data_mgt::clear_assets(assets_clone.clone()).await {
if let Err(e) = data_mgt::clear_app_data(&inner_appt_state).await {
eprintln!("Error clearing assets: {}", e);
}
}
});
HttpServer::new(move || {
App::new()
.app_data(web::JsonConfig::default().limit(1024 * 1024 * 3))
.app_data(web::Data::new(assets.clone()))
.app_data(web::JsonConfig::default().limit(1024 * 1024 * 3)) // 3MB limit
.app_data(web::Data::new(app_state.clone()))
.service(index)
.service(stats)
.service(view_asset)