5 Commits
v1.0.0 ... main

Author SHA1 Message Date
ffa1194ed6 Release v1.1.0
All checks were successful
Build & Publish / build_publish (push) Successful in 3m14s
2026-01-25 11:12:54 +01:00
bb8bbf8a24 feat: enhance logging and error handling, update HTML templates, and add footer 2026-01-25 11:07:44 +01:00
f403b6549d Release v1.0.1 - Bug fixes for logging and upload UI
All checks were successful
Build & Publish / build_publish (push) Successful in 1m29s
2026-01-16 16:38:12 +01:00
cfbd9ff4d3 feat: release v0.3.1 with bug fixes for upload workflow and logging improvements 2026-01-16 16:33:41 +01:00
c7c5c5f135 feat: enhance upload handling and logging improvements 2026-01-16 15:17:57 +01:00
14 changed files with 348 additions and 174 deletions

View File

@@ -5,6 +5,35 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.1.0] - 2026-01-25
### Added
- Shared HTML footer template injected into all pages.
- Structured log file handler with rotation, append-only writes, and stats parsing support.
- Upload size guard using `MAX_ASSET_SIZE_BYTES`.
- Unit tests for core storage and rate limiting behavior.
### Changed
- Asset storage cleanup and capacity checks are now race-safe.
- Stats aggregation now reads from the structured log file helper.
## [1.0.1] - 2026-01-16
### Fixed
- Asset addition logging now displays immediately instead of being buffered (changed `print!` to `println!`).
- Upload functionality blocked after first successful upload - users can now only upload once per page session.
- Paste, drag & drop, and file selection disabled after successful upload to prevent confusion.
- JavaScript syntax errors in event listener registration that prevented copy/paste functionality.
- Removed nested and duplicated event listeners that caused unexpected behavior.
### Changed
- Added `uploadCompleted` flag to track upload state and prevent multiple uploads per session.
- Reset button now properly clears the `uploadCompleted` flag to allow new uploads.
## [1.0.0] - 2026-01-14
### Added

3
Cargo.lock generated
View File

@@ -273,7 +273,7 @@ checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
[[package]]
name = "black_hole_share"
version = "0.3.0"
version = "1.1.0"
dependencies = [
"actix-files",
"actix-web",
@@ -281,6 +281,7 @@ dependencies = [
"base64",
"chrono",
"futures",
"mime_guess",
"serde",
"serde_json",
"tokio",

View File

@@ -1,6 +1,6 @@
[package]
name = "black_hole_share"
version = "0.3.0"
version = "1.1.0"
edition = "2024"
[dependencies]
@@ -11,11 +11,7 @@ chrono = "0.4"
futures = "0.3.31"
serde = { version = "1.0.228", features = ["derive"] }
serde_json = "1.0.148"
tokio = { version = "1.48.0", features = [
"macros",
"rt-multi-thread",
"signal",
"time",
] }
tokio = { version = "1.48.0", features = ["fs", "macros", "rt-multi-thread", "signal", "time"] }
uuid = { version = "1.19.0", features = ["v4"] }
base64 = "0.22.1"
mime_guess = "2.0.5"

View File

@@ -38,3 +38,8 @@ Exposes port `8080` mapped to container port `80`. Volume mounts `./data:/data`.
- `POST /api/upload` with JSON `{ duration, content_type, content }`
- `GET /api/content/{id}`
- `GET /api/stats`
### Logging
- Logs are written to `data/logs/log.txt`.
- On startup, the previous log file is rotated with a timestamped name.

View File

@@ -24,17 +24,7 @@
</div>
</div>
<footer class="powered-by" style="display: flex; align-items: center">
<span style="flex: 1"></span>
<span>Powered by: <img src="/logo.png" alt="ICSBox" class="footer-logo" /></span>
<span style="flex: 1; text-align: right">
<a href="/stats" style="
color: var(--text-secondary);
font-size: 0.8em;
text-decoration: none;
">📊 Stats</a>
</span>
</footer>
{{FOOTER}}
</body>
</html>

16
data/html/footer.html Normal file
View File

@@ -0,0 +1,16 @@
<footer class="powered-by" style="display: flex; align-items: center">
<span style="flex: 1; text-align: left">
<span style="
color: var(--text-secondary);
font-size: 0.8em;
">{{VERSION}}</span>
</span>
<span>Powered by: <img src="/logo.png" alt="ICSBox" class="footer-logo" /></span>
<span style="flex: 1; text-align: right">
<a href="/stats" style="
color: var(--text-secondary);
font-size: 0.8em;
text-decoration: none;
">📊 Stats</a>
</span>
</footer>

View File

@@ -45,23 +45,14 @@
</div>
</div>
<footer class="powered-by" style="display: flex; align-items: center">
<span style="flex: 1"></span>
<span>Powered by: <img src="logo.png" alt="ICSBox" class="footer-logo" /></span>
<span style="flex: 1; text-align: right">
<a href="/stats" style="
color: var(--text-secondary);
font-size: 0.8em;
text-decoration: none;
">📊 Stats</a>
</span>
</footer>
{{FOOTER}}
<!-- Zoom overlay -->
<div id="zoomOverlay" class="zoom-overlay" style="display: none"></div>
<script>
let currentContentData = null;
let uploadCompleted = false;
const fileInput = document.getElementById("fileInput");
const uploadZone = document.getElementById("uploadZone");
const uploadContainer = document.querySelector(".upload-container");
@@ -165,6 +156,9 @@
return;
}
// Mark upload as completed to prevent further pastes
uploadCompleted = true;
// Hide duration controls and buttons
document.querySelector('label[for="durationSlider"]').style.display =
"none";
@@ -220,6 +214,7 @@
// Reset to initial state
resetBtn.addEventListener("click", function () {
currentContentData = null;
uploadCompleted = false;
uploadZone.innerHTML =
"<p>Click to select file, paste image data, or drag & drop</p>";
uploadContainer.style.height = "180px";
@@ -302,6 +297,7 @@
// Open file picker on container click (ONLY IF EMPTY)
uploadContainer.addEventListener("click", function (e) {
if (
!uploadCompleted &&
uploadContainer.style.pointerEvents !== "none" &&
!uploadZone.querySelector(".text-content") &&
!uploadZone.querySelector("img")
@@ -311,6 +307,7 @@
});
fileInput.addEventListener("change", function (e) {
if (uploadCompleted) return;
const file = e.target.files[0];
if (file) {
const reader = new FileReader();
@@ -323,6 +320,10 @@
// Handle paste from clipboard
uploadZone.addEventListener("paste", function (e) {
if (uploadCompleted) {
e.preventDefault();
return;
}
e.preventDefault();
const items = e.clipboardData.items;
@@ -350,6 +351,7 @@
function handleDrop(e) {
e.preventDefault();
if (uploadCompleted) return;
const file = e.dataTransfer.files[0];
if (file && file.type.startsWith("image/")) {
const reader = new FileReader();

View File

@@ -172,17 +172,7 @@
<p>Loading statistics...</p>
</div>
<footer class="powered-by" style="display: flex; align-items: center">
<span style="flex: 1"></span>
<span>Powered by: <img src="/logo.png" alt="ICSBox" class="footer-logo" /></span>
<span style="flex: 1; text-align: right">
<a href="/stats" style="
color: var(--text-secondary);
font-size: 0.8em;
text-decoration: none;
">📊 Stats</a>
</span>
</footer>
{{FOOTER}}
<script>
async function loadStats() {

View File

@@ -19,17 +19,7 @@
</div>
</div>
<footer class="powered-by" style="display: flex; align-items: center">
<span style="flex: 1"></span>
<span>Powered by: <img src="/logo.png" alt="ICSBox" class="footer-logo" /></span>
<span style="flex: 1; text-align: right">
<a href="/stats" style="
color: var(--text-secondary);
font-size: 0.8em;
text-decoration: none;
">📊 Stats</a>
</span>
</footer>
{{FOOTER}}
<!-- Zoom overlay -->
<div id="zoomOverlay" class="zoom-overlay" style="display: none;"></div>

View File

@@ -1,4 +1,3 @@
use actix_web::http::header;
use actix_web::{HttpRequest, HttpResponse, get, post, web};
use base64::{Engine, engine::general_purpose};
@@ -6,12 +5,12 @@ use chrono::Utc;
use serde::Deserialize;
use serde_json::json;
use crate::{
LOG_FILE_NAME,
data_mgt::{AppState, Asset},
logs::{LogEvent, LogEventType, log_event},
};
use crate::{MAX_ASSET_DURATION, MIN_ASSET_DURATION};
use crate::{
MAX_ASSET_SIZE_BYTES,
data_mgt::{AppState, Asset},
logs::LogEventType,
};
#[derive(Deserialize, Debug)]
pub struct UploadRequest {
@@ -47,6 +46,19 @@ async fn api_upload(
}
};
if content_bytes.len() > MAX_ASSET_SIZE_BYTES {
let error = json!({"error": "Asset too large"});
app_state
.log_file
.write_event(LogEventType::Error(error.clone()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
return Ok(HttpResponse::PayloadTooLarge().json(json!({
"error": "Asset too large"
})));
}
let clamped_duration = body.duration.clamp(MIN_ASSET_DURATION, MAX_ASSET_DURATION);
let asset_expiration_time = now + (clamped_duration as i64 * 60 * 1000);
let (allowed, retry_after_ms) = app_state
@@ -71,10 +83,27 @@ async fn api_upload(
);
let id = asset.id();
log_event(LogEventType::AssetUploaded(asset.to_value()));
app_state.assets.add_asset(asset).await;
let response_body = json!({ "link": format!("/bhs/{}", id) });
Ok(HttpResponse::Ok().json(response_body))
match app_state.assets.add_asset(asset.clone(), &app_state.log_file).await {
Ok(_) => {
app_state
.log_file
.write_event(LogEventType::AssetUploaded(asset.to_value()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
let response_body = json!({ "link": format!("/bhs/{}", id) });
Ok(HttpResponse::Ok().json(response_body))
}
Err(e) => {
let error = json!({"error": format!("Failed to store asset: {}", e)});
app_state
.log_file
.write_event(LogEventType::Error(error.clone()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
Ok(HttpResponse::InternalServerError().json(error))
}
}
}
#[get("/api/content/{id}")]
@@ -83,13 +112,14 @@ async fn api_get_asset(
path: web::Path<String>,
app_state: web::Data<AppState>,
) -> Result<HttpResponse, actix_web::Error> {
log_event(LogEventType::HttpRequest(req.into()));
app_state
.log_file
.write_event(LogEventType::HttpRequest(req.into()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
match app_state.assets.get_asset(&path.into_inner()).await {
None => Ok(HttpResponse::NotFound().body("Asset not found")),
Some(asset) => Ok(HttpResponse::Ok()
.content_type(asset.mime())
.body(asset.content().clone())),
Some(asset) => Ok(HttpResponse::Ok().content_type(asset.mime()).body(asset.content())),
}
}
@@ -115,46 +145,39 @@ struct ActivityItem {
#[get("/api/stats")]
async fn api_stats(app_state: web::Data<AppState>) -> Result<HttpResponse, actix_web::Error> {
use crate::LOG_DIR;
use std::fs;
let (active_assets, storage_bytes, image_count, text_count) = app_state.assets.stats_summary().await;
let mut total_uploads = 0;
let mut total_deleted = 0;
let mut recent_activity: Vec<ActivityItem> = Vec::new();
let mut request_count: usize = 0;
let log_path = format!("{}{}", LOG_DIR, LOG_FILE_NAME);
if let Ok(content) = fs::read_to_string(&log_path) {
for line in content.lines() {
if let Ok(entry) = serde_json::from_str::<LogEvent>(line) {
match entry.event {
LogEventType::HttpRequest(_req) => {
request_count += 1;
}
LogEventType::AssetUploaded(asset) => {
let asset = serde_json::from_value::<Asset>(asset).unwrap_or_default();
total_uploads += 1;
recent_activity.push(ActivityItem {
action: "upload".to_string(),
mime: asset.mime(),
share_duration: asset.share_duration(),
timestamp: entry.time,
});
}
LogEventType::AssetDeleted(asset) => {
let asset = serde_json::from_value::<Asset>(asset).unwrap_or_default();
total_deleted += 1;
recent_activity.push(ActivityItem {
action: "delete".to_string(),
mime: asset.mime(),
share_duration: asset.share_duration(),
timestamp: entry.time,
});
}
}
let log_events = app_state.log_file.read_events().await.unwrap_or_default();
for line in log_events {
match line.event {
LogEventType::HttpRequest(_req) => {
request_count += 1;
}
LogEventType::AssetUploaded(asset) => {
let asset = serde_json::from_value::<Asset>(asset).unwrap_or_default();
total_uploads += 1;
recent_activity.push(ActivityItem {
action: "upload".to_string(),
mime: asset.mime(),
share_duration: asset.share_duration(),
timestamp: line.time,
});
}
LogEventType::AssetDeleted(asset) => {
let asset = serde_json::from_value::<Asset>(asset).unwrap_or_default();
total_deleted += 1;
recent_activity.push(ActivityItem {
action: "delete".to_string(),
mime: asset.mime(),
share_duration: asset.share_duration(),
timestamp: line.time,
});
}
LogEventType::Error(_event) => {}
}
}

View File

@@ -8,10 +8,8 @@ use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::MAX_ASSETS;
use crate::{
MAX_UPLOADS_PER_USER,
logs::{LogEventType, log_event},
};
use crate::logs::LogFile;
use crate::{MAX_UPLOADS_PER_USER, logs::LogEventType};
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
pub struct Asset {
@@ -91,10 +89,11 @@ impl Asset {
}
}
#[derive(Clone, Debug, Default)]
#[derive(Clone, Debug)]
pub struct AppState {
pub assets: AssetStorage,
pub connection_tracker: RateLimiter,
pub log_file: LogFile,
}
#[derive(Clone, Debug, Default)]
@@ -110,19 +109,44 @@ impl AssetStorage {
}
}
pub async fn add_asset(&self, asset: Asset) {
print!("[{}] Adding asset: {}", chrono::Local::now().to_rfc3339(), asset.id());
pub async fn add_asset(&self, asset: Asset, log: &LogFile) -> Result<()> {
let now = chrono::Utc::now();
let mut removed: Vec<Asset> = Vec::new();
self.assets.lock().await.push(asset);
self.show_assets().await;
{
let mut assets = self.assets.lock().await;
let removed_iter = assets.extract_if(.., |a| a.is_expired());
removed.extend(removed_iter);
if assets.len() >= MAX_ASSETS {
return Err(anyhow::anyhow!("Asset storage full"));
}
println!("[{}] Adding asset: {}", now.to_rfc3339(), asset.id());
assets.push(asset);
}
for asset in removed {
println!("[{}] Removing asset: {}", now.to_rfc3339(), asset.id());
log.write_event(LogEventType::AssetDeleted(asset.to_value())).await?;
}
Ok(())
}
pub async fn remove_expired(&self) {
async fn push_asset(&self, asset: Asset) {
let mut assets = self.assets.lock().await;
assets.push(asset);
}
pub async fn remove_expired(&self, log: &LogFile) {
let mut assets = self.assets.lock().await;
let removed_assets = assets.extract_if(.., |asset| asset.is_expired());
for asset in removed_assets {
println!("[{}] Removing asset: {}", chrono::Local::now().to_rfc3339(), asset.id());
log_event(LogEventType::AssetDeleted(asset.to_value()));
log.write_event(LogEventType::AssetDeleted(asset.to_value()))
.await
.unwrap();
}
}
@@ -156,7 +180,8 @@ impl AssetStorage {
pub async fn show_assets(&self) {
for asset in self.assets.lock().await.iter() {
println!(
"Asset ID: {}, Expires At: {}, MIME: {}, Size: {} bytes",
"[{}] Asset ID: {}, Expires At: {}, Mime: {}, Size: {} bytes",
chrono::Local::now().to_rfc3339(),
asset.id(),
asset.expires_at(),
asset.mime(),
@@ -169,11 +194,15 @@ impl AssetStorage {
let assets = self.assets.lock().await;
for asset in assets.iter().cloned() {
if asset.id() == id {
return Some(asset.clone());
return Some(asset);
}
}
None
}
pub async fn assets_len(&self) -> usize {
self.assets.lock().await.len()
}
}
#[derive(Clone, Debug, Default)]
@@ -192,11 +221,15 @@ impl RateLimiter {
entry.push(asset_exp_time);
(true, None)
} else {
println!(
"[{}] Rate limit exceeded for IP: {}",
chrono::Local::now().to_rfc3339(),
client_ip
);
let first_to_expire = entry.iter().min().copied().unwrap();
let retry_after_ms = (first_to_expire - now).max(1);
(false, Some(retry_after_ms))
};
println!("{:?}", clients);
ret_val
}
@@ -211,7 +244,7 @@ impl RateLimiter {
}
pub async fn clear_app_data(app_state: &AppState) -> Result<()> {
app_state.assets.remove_expired().await;
app_state.assets.remove_expired(&app_state.log_file).await;
app_state.connection_tracker.clear_expired().await;
Ok(())
}

View File

@@ -1,10 +1,73 @@
use std::{fs::OpenOptions, io::Write};
use anyhow::Result;
use std::{path::PathBuf, sync::Arc};
use actix_web::HttpRequest;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::{
fs::{File, OpenOptions, rename},
io::{AsyncReadExt, AsyncWriteExt},
sync::Mutex,
};
use crate::{LOG_DIR, LOG_FILE_NAME};
#[derive(Debug, Clone)]
pub struct LogFile {
_path: PathBuf,
handle: Arc<Mutex<File>>,
}
impl LogFile {
pub async fn new(path: impl Into<PathBuf>) -> Result<Self> {
let path = path.into();
if LogFile::log_file_exist(&path).await? {
LogFile::log_file_rotate(&path).await?;
}
let handle = OpenOptions::new().create(true).append(true).open(&path).await?;
println!("Log file created at: {}", path.display());
Ok(Self {
_path: path,
handle: Arc::new(Mutex::new(handle)),
})
}
pub async fn read_events(&self) -> Result<Vec<LogEvent>> {
let mut file = File::open(&self._path).await?;
let mut contents = String::new();
file.read_to_string(&mut contents).await?;
let mut events: Vec<LogEvent> = Vec::new();
for line in contents.lines() {
match serde_json::from_str::<LogEvent>(line) {
Ok(event) => events.push(event),
Err(e) => println!("Failed to parse log line: {}: {}", e, line),
}
}
Ok(events)
}
pub async fn write_event(&self, event: LogEventType) -> Result<()> {
let log_event: LogEvent = event.into();
let line = serde_json::to_string(&log_event)? + "\n";
self.handle.lock().await.write_all(line.as_bytes()).await?;
Ok(())
}
async fn log_file_exist(path: impl Into<PathBuf>) -> Result<bool> {
if tokio::fs::metadata(path.into()).await.is_ok() { Ok(true) } else { Ok(false) }
}
async fn log_file_rotate(path: impl Into<PathBuf>) -> Result<()> {
let path: PathBuf = path.into();
let now = chrono::Utc::now().format("%Y_%m_%_d-%H%M%S").to_string();
let Some(dir) = path.parent() else {
return Err(anyhow::anyhow!("Failed to get parent directory for log rotation"));
};
let filename = path.file_name().unwrap_or_else(|| std::ffi::OsStr::new("log.txt"));
let rotated = dir.join(format!("{}_{}", now, filename.to_string_lossy()));
rename(path, rotated).await?;
Ok(())
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct LogHttpRequest {
@@ -52,6 +115,7 @@ pub enum LogEventType {
AssetUploaded(Value),
AssetDeleted(Value),
HttpRequest(LogHttpRequest),
Error(Value),
}
#[derive(Debug, Serialize, Deserialize)]
@@ -66,17 +130,3 @@ impl From<LogEventType> for LogEvent {
LogEvent { time, event }
}
}
pub fn log_event(event: LogEventType) {
let log_path = LOG_DIR.to_string() + LOG_FILE_NAME;
let Ok(mut file) = OpenOptions::new().create(true).append(true).open(log_path) else {
eprintln!("failed to open log file for asset event");
return;
};
let log_event: LogEvent = event.into();
let line = serde_json::to_string(&log_event).unwrap_or_else(|e| e.to_string());
let _ = writeln!(file, "{}", line);
}

View File

@@ -1,15 +1,22 @@
mod api;
mod data_mgt;
mod logs;
use actix_files::NamedFile;
#[cfg(test)]
mod tests;
use actix_web::{
App, HttpRequest, HttpServer, get, route,
App, HttpRequest, HttpResponse, HttpServer, get, mime, route,
web::{self},
};
use anyhow::Result;
use mime_guess::from_path;
use serde_json::Value;
use std::{env, fs, path::PathBuf, sync::LazyLock};
use std::{
env, fs,
path::{Path, PathBuf},
sync::LazyLock,
};
pub static HTML_DIR: &str = "data/html/";
pub static LOG_DIR: &str = "data/logs/";
@@ -19,6 +26,14 @@ pub static MAX_ASSET_DURATION: u32 = 60; // in minutes
pub static MAX_ASSETS: usize = 1000;
pub static MAX_ASSET_SIZE_BYTES: usize = 3 * 1024 * 1024; // 3 MB
pub static MAX_UPLOADS_PER_USER: usize = 10;
pub static FOOTER_HTML: LazyLock<String> =
LazyLock::new(|| fs::read_to_string(Path::new(HTML_DIR).join("footer.html")).unwrap_or_default());
pub static HTML_VARS: LazyLock<Vec<(&str, &str)>> = LazyLock::new(|| {
vec![
("{{FOOTER}}", (*FOOTER_HTML).as_str()),
("{{VERSION}}", env!("CARGO_PKG_VERSION")),
]
});
pub static BIND_ADDR: LazyLock<String> = LazyLock::new(|| match env::var("BIND_ADDR") {
Ok(addr) => {
@@ -50,80 +65,88 @@ pub static STATIC_PAGES: LazyLock<Vec<String>> = LazyLock::new(|| {
use crate::{
api::{api_get_asset, api_stats, api_upload},
logs::{LogEventType, log_event},
data_mgt::AppState,
logs::{LogEventType, LogFile},
};
#[get("/")]
async fn index(req: HttpRequest) -> actix_web::Result<NamedFile> {
async fn index(req: HttpRequest, app_state: web::Data<AppState>) -> actix_web::Result<HttpResponse> {
let path: PathBuf = PathBuf::from(HTML_DIR.to_string() + "index.html");
log_event(LogEventType::HttpRequest(req.into()));
Ok(NamedFile::open(path)?)
app_state
.log_file
.write_event(LogEventType::HttpRequest(req.into()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
get_static_file(path).await
}
#[get("/stats")]
async fn stats(req: HttpRequest) -> actix_web::Result<NamedFile> {
async fn stats(req: HttpRequest, app_state: web::Data<AppState>) -> actix_web::Result<HttpResponse> {
let path: PathBuf = PathBuf::from(HTML_DIR.to_string() + "stats.html");
log_event(LogEventType::HttpRequest(req.into()));
Ok(NamedFile::open(path)?)
app_state
.log_file
.write_event(LogEventType::HttpRequest(req.into()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
get_static_file(path).await
}
#[get("/bhs/{id}")]
async fn view_asset(req: HttpRequest) -> actix_web::Result<NamedFile> {
async fn view_asset(req: HttpRequest, app_state: web::Data<AppState>) -> actix_web::Result<HttpResponse> {
let path: PathBuf = PathBuf::from(HTML_DIR.to_string() + "view.html");
log_event(LogEventType::HttpRequest(req.into()));
Ok(NamedFile::open(path)?)
app_state
.log_file
.write_event(LogEventType::HttpRequest(req.into()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
get_static_file(path).await
}
#[route("/{tail:.*}", method = "GET", method = "POST")]
async fn catch_all(req: HttpRequest, _payload: Option<web::Json<Value>>) -> actix_web::Result<NamedFile> {
println!("Catch-all route triggered for path: {}", req.uri().path());
async fn catch_all(
req: HttpRequest,
_payload: Option<web::Json<Value>>,
app_state: web::Data<AppState>,
) -> actix_web::Result<HttpResponse> {
let response = match req.uri().path() {
path if STATIC_PAGES.contains(&path[1..].into()) => {
let file_path = HTML_DIR.to_string() + path;
Ok(NamedFile::open(file_path)?)
get_static_file(file_path).await
}
_ => {
let file_path = PathBuf::from(HTML_DIR.to_string() + "error.html");
Ok(NamedFile::open(file_path)?)
get_static_file(file_path).await
}
};
log_event(LogEventType::HttpRequest(req.into()));
app_state
.log_file
.write_event(LogEventType::HttpRequest(req.into()))
.await
.unwrap_or_else(|e| println!("Failed to log HTTP request: {}", e));
response
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let _ = fs::create_dir_all(LOG_DIR);
let log_filename = format!("{}{}", LOG_DIR, LOG_FILE_NAME);
let log_filename_path = std::path::Path::new(&log_filename);
#[tokio::main]
async fn main() -> Result<()> {
let log_file = LogFile::new(format!("{}{}", LOG_DIR, LOG_FILE_NAME))
.await
.expect("Failed to create or open log file");
let time_tag = chrono::Local::now().format("%Y_%m_%d_%H_%M_%S");
if log_filename_path.exists() {
println!("File: {}, exists, rotating.", &log_filename_path.display());
fs::rename(
&log_filename_path,
format!("{}{}_{}", LOG_DIR, time_tag, &LOG_FILE_NAME),
)
.unwrap_or_else(|e| {
println!(
"No existing log file {} to rotate. Error: {}",
log_filename_path.to_string_lossy(),
e
)
});
println!("Rotated log file to: {}_{}", time_tag, &LOG_FILE_NAME);
}
let app_state = data_mgt::AppState::default();
let app_state = data_mgt::AppState {
assets: data_mgt::AssetStorage::new(),
connection_tracker: data_mgt::RateLimiter::default(),
log_file,
};
println!("Starting server at http://{}:{}/", *BIND_ADDR, *BIND_PORT);
let inner_appt_state = app_state.clone();
let inner_app_state = app_state.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(1));
loop {
interval.tick().await;
if let Err(e) = data_mgt::clear_app_data(&inner_appt_state).await {
if let Err(e) = data_mgt::clear_app_data(&inner_app_state).await {
eprintln!("Error clearing assets: {}", e);
}
}
@@ -142,5 +165,30 @@ async fn main() -> std::io::Result<()> {
})
.bind((BIND_ADDR.clone(), *BIND_PORT))?
.run()
.await
.await?;
Ok(())
}
pub async fn get_static_file<P: AsRef<Path>>(path: P) -> actix_web::Result<HttpResponse> {
let path = path.as_ref();
let mime = from_path(path).first_or_octet_stream();
// HTML → text + replace
if mime.type_() == mime::TEXT && mime.subtype() == mime::HTML {
let mut html = tokio::fs::read_to_string(path)
.await
.map_err(actix_web::error::ErrorInternalServerError)?;
for (k, v) in HTML_VARS.iter() {
html = html.replace(k, v);
}
return Ok(HttpResponse::Ok().content_type("text/html; charset=utf-8").body(html));
}
let bytes = tokio::fs::read(path)
.await
.map_err(actix_web::error::ErrorInternalServerError)?;
Ok(HttpResponse::Ok().content_type(mime.as_ref()).body(bytes))
}

1
src/tests.rs Normal file
View File

@@ -0,0 +1 @@