From d6c465466a8fc00e218a3305c0ecf94ddbb5ffe3 Mon Sep 17 00:00:00 2001 From: icsboyx Date: Fri, 9 Jan 2026 20:59:24 +0100 Subject: [PATCH] feat: add statistics API and dashboard for asset metrics - Implemented `/api/stats` endpoint to return JSON metrics including active assets, total uploads, storage usage, and recent activity. - Created `stats.html` page to display real-time statistics with auto-refresh functionality. - Enhanced asset logging to include uploader IP and detailed event information for uploads and deletions. - Updated asset model to store uploader IP for audit purposes. - Improved logging functionality to ensure log directory exists before writing. - Refactored asset creation and management to support new features and logging. --- .github/copilot-instructions.md | 16 + CHANGELOG.md | 40 ++ LICENSE | 21 ++ README.md | 127 +++++++ data/html/index.html | 635 +++++++++++++++++--------------- data/html/stats.html | 244 ++++++++++++ src/api.rs | 175 ++++++++- src/data_mgt.rs | 36 +- src/logs.rs | 38 ++ src/main.rs | 9 +- 10 files changed, 1036 insertions(+), 305 deletions(-) create mode 100644 .github/copilot-instructions.md create mode 100644 CHANGELOG.md create mode 100644 LICENSE create mode 100644 data/html/stats.html diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..3c9126a --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,16 @@ +# Black Hole Share – AI Guide + +- Purpose: lightweight Actix-Web service for ephemeral image/text sharing; uploads saved as JSON files on disk and purged after their TTL. +- **Base directory is `data/`**: the server uses relative paths `data/html/`, `data/logs/`, `data/storage/`. Run from repo root locally; Docker mounts `./data:/data`. +- HTTP entrypoint and routing live in [src/main.rs](../src/main.rs): `/` serves `index.html`, `/bhs/{id}` serves `view.html`, `/api/upload` and `/api/content/{id}` registered from the API module, catch-all serves other static files under `html/` (list cached at startup via `STATIC_PAGES`). +- Request JSON bodies capped at ~3 MiB via `web::JsonConfig`. Background cleanup task runs every 60s to delete expired assets in `storage/`. +- Upload API in [src/api.rs](../src/api.rs): accepts JSON `{ duration: minutes, content_type, content }`; `text/plain` content is stored raw bytes, other types are base64-decoded. On success returns `{ "link": "/bhs/" }`. +- Fetch API in [src/api.rs](../src/api.rs): loads `{id}` from `storage/`, rejects missing or expired assets, responds with original MIME and bytes. +- Asset model and persistence in [src/data_mgt.rs](../src/data_mgt.rs): assets serialized as JSON files named by UUID, with `expires_at` computed from `share_duration` (minutes). Cleanup logs removals to stdout. +- Logging helper in [src/logs.rs](../src/logs.rs): appends access lines with timing, IPs, scheme, UA to `logs/access.log`; runs for every handled request. +- Frontend upload page [data/html/index.html](../data/html/index.html): JS handles drag/drop, paste, or file picker; converts images to base64 or keeps text, POSTs to `/api/upload`, shows returned link and copies to clipboard. Styling/theme in [data/html/style.css](../data/html/style.css). +- Viewer page [data/html/view.html](../data/html/view.html): fetches `/api/content/{id}`, renders images with zoom overlay or text with zoomable modal; shows error when content missing/expired. +- Environment: `BIND_ADDR` and `BIND_PORT` (defaults 0.0.0.0:8080) are read via `LazyLock` on startup; `tokio` multi-thread runtime used. +- Build/dev: `cargo run --release` from repo root (ensure `data/` exists with `html/`, `logs/`, `storage/`), or use Dockerfile (Arch base + rustup build) and docker-compose (Traefik labels, port 8080→80, volume `./data:/data`). +- No test suite present; verify changes by running the server and exercising `/api/upload` and `/api/content/{id}` via the provided UI or curl. +- When adding features, keep payload sizes small or adjust the JSON limit in [src/main.rs](../src/main.rs); ensure new routes log via `log_to_file` for observability; clean up expired artifacts consistently with `clear_assets()` patterns. diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..453ef09 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,40 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.1.0] - 2026-01-09 + +### Added + +- **Statistics Dashboard** (`/stats.html`) with real-time metrics: + - Active assets count + - Total uploads and deletions + - Storage usage + - Image vs text breakdown + - Average server response time + - Total request count + - Recent activity feed (last 20 events) + - Auto-refresh every 30 seconds +- **Statistics API** (`GET /api/stats`) returning JSON metrics +- **Enhanced logging** for asset events: + - Upload events with uploader IP, MIME type, size, duration, timestamps + - Delete events with full asset metadata + - Request timing (`dur_ms`) in access logs +- **Uploader IP tracking** stored with each asset for audit purposes +- Stats link in index page footer +- Ephemeral image and text sharing with configurable TTL (1-60 minutes) +- Drag/drop, paste, and file picker upload support +- Base64 encoding for images, raw text for plain text +- UUID-based asset storage as JSON files +- Background cleanup task (every 60 seconds) +- Dark theme UI with zoom overlay +- View page for shared content +- Access logging with timing, IPs, and user agent +- Docker and docker-compose support with Traefik labels +- Environment variables for bind address and port +- Access logging with timing, IPs, and user agent +- Docker and docker-compose support with Traefik labels +- Environment variables for bind address and port diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..31b33a1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2026 Black Hole Share Contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index e69de29..9a3913e 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,127 @@ +# Black Hole Share + +A lightweight, ephemeral file sharing service built with Rust and Actix-Web. Upload images or text with a configurable TTL (1-60 minutes) and share via a unique link. Content is automatically purged after expiration. + +## Features + +- **Ephemeral sharing** – uploads auto-delete after the specified duration +- **Image & text support** – drag/drop, paste, or file picker for images; paste text directly +- **Zero database** – assets stored as JSON files on disk +- **Dark theme UI** – clean, responsive interface with zoom overlay +- **Statistics dashboard** – real-time stats at `/stats.html` (active assets, uploads, response times) +- **Access logging** – request and asset events logged to `data/logs/access.log` with IP, timing, and metadata + +## Quick Start + +### Local Development + +```bash +# Run from repo root (paths resolve to data/html/, data/logs/, data/storage/) +cargo run --release +``` + +Server starts at `http://0.0.0.0:8080` by default. + +> **Note:** All paths are relative to the repo root: `data/html/`, `data/logs/`, `data/storage/`. + +### Docker + +```bash +docker-compose up --build +``` + +Exposes port `8080` mapped to container port `80`. Volume mounts `./data:/data`. + +## Configuration + +| Environment Variable | Default | Description | +| -------------------- | --------- | --------------- | +| `BIND_ADDR` | `0.0.0.0` | Address to bind | +| `BIND_PORT` | `8080` | Port to bind | + +## API + +### Upload + +```http +POST /api/upload +Content-Type: application/json + +{ + "duration": 5, + "content_type": "text/plain", + "content": "Hello, world!" +} +``` + +- `duration` – TTL in minutes (1-60) +- `content_type` – MIME type (`text/plain` or `image/*`) +- `content` – raw text or base64-encoded image data + +**Response:** + +```json +{ "link": "/bhs/550e8400-e29b-41d4-a716-446655440000" } +``` + +### Fetch + +```http +GET /api/content/{id} +``` + +Returns the original content with appropriate MIME type, or `404` if expired/missing. + +### Statistics + +```http +GET /api/stats +``` + +**Response:** + +```json +{ + "active_assets": 5, + "total_uploads": 42, + "total_deleted": 37, + "storage_bytes": 1048576, + "image_count": 3, + "text_count": 2, + "avg_response_ms": 0.85, + "total_requests": 150, + "recent_activity": [...] +} +``` + +## Project Structure + +``` +├── src/ +│ ├── main.rs # HTTP server, routing, background cleanup +│ ├── api.rs # Upload/fetch endpoints +│ ├── data_mgt.rs # Asset model, persistence, expiration +│ └── logs.rs # Request and asset event logging +├── data/ +│ ├── html/ # Frontend (index.html, view.html, stats.html, style.css) +│ ├── logs/ # Access logs +│ └── storage/ # Stored assets (auto-created) +├── Dockerfile +├── docker-compose.yaml +└── Cargo.toml +``` + +## Runtime Layout + +The server uses paths relative to the repo root under `data/`: + +- `data/html/` – frontend assets (index.html, view.html, style.css) +- `data/logs/` – access logs +- `data/storage/` – uploaded assets (auto-created) + +- **Local dev:** Run from repo root with `cargo run --release` +- **Docker:** Volume mounts `./data:/data`, container WORKDIR is `/` + +## License + +MIT diff --git a/data/html/index.html b/data/html/index.html index cab2526..6b13caa 100644 --- a/data/html/index.html +++ b/data/html/index.html @@ -1,341 +1,388 @@ + + + + Image Upload + + - - - - Image Upload - - + +

Black Hole Share

- -

Black Hole Share

- -
-
- -
-

Click to select file, paste image, text data, or drag & drop

+
+
+ +
+

Click to select file, paste image, text data, or drag & drop

+
-
-
- - -
- - +
+ + +
+ + +
+
- -
-
- Powered by: -
+ - - + + - - + zoomOverlay.addEventListener("click", hideZoom); - \ No newline at end of file + // ESC TO EXIT ZOOM + document.addEventListener("keydown", function (e) { + if (e.key === "Escape" || e.key === "Esc") { + hideZoom(); + } + }); + + window.addEventListener("resize", function () { + if (currentContentData) { + displayContent(currentContentData); + } + }); + + + diff --git a/data/html/stats.html b/data/html/stats.html new file mode 100644 index 0000000..6ea924e --- /dev/null +++ b/data/html/stats.html @@ -0,0 +1,244 @@ + + + + + + Black Hole Share - Statistics + + + + + +

Black Hole Share - Statistics

+ +
+

Loading statistics...

+
+ +
+ Powered by: +
+ + + + diff --git a/src/api.rs b/src/api.rs index b44e2d2..3215ff5 100644 --- a/src/api.rs +++ b/src/api.rs @@ -3,7 +3,10 @@ use base64::{Engine, engine::general_purpose}; use serde::Deserialize; use serde_json::json; -use crate::{DATA_STORAGE, logs::log_to_file}; +use crate::{ + DATA_STORAGE, + logs::{log_asset_event, log_to_file}, +}; #[derive(Deserialize, Debug)] pub struct UploadRequest { @@ -13,20 +16,43 @@ pub struct UploadRequest { } #[post("/api/upload")] -async fn api_upload(req: web::Json) -> Result { +async fn api_upload(req: HttpRequest, body: web::Json) -> Result { // Convert to bytes - let content_bytes = if req.content_type == "text/plain" { - req.content.as_bytes().to_vec() // UTF-8 bytes + let content_bytes = if body.content_type == "text/plain" { + body.content.as_bytes().to_vec() // UTF-8 bytes } else { // Decode base64 → bytes - general_purpose::STANDARD.decode(&req.content).unwrap() + general_purpose::STANDARD.decode(&body.content).unwrap() }; - let asset = crate::data_mgt::Asset::new(req.duration, req.content_type.clone(), content_bytes); + let connection_info = req.connection_info(); + let uploader_ip = connection_info + .realip_remote_addr() + .or_else(|| connection_info.peer_addr()) + .unwrap_or("-") + .to_string(); + + let asset = crate::data_mgt::Asset::new( + body.duration, + body.content_type.clone(), + content_bytes, + Some(uploader_ip.clone()), + ); let id = asset .save() .map_err(|e| actix_web::error::ErrorInternalServerError(format!("Failed to save asset: {}", e)))?; + log_asset_event( + "upload", + asset.id(), + asset.mime(), + asset.size_bytes(), + asset.share_duration(), + asset.created_at(), + asset.expires_at(), + asset.uploader_ip().unwrap_or("-"), + ); + let response_body = json!({ "link": format!("/bhs/{}", id) }); Ok(HttpResponse::Ok().json(response_body)) } @@ -48,3 +74,140 @@ async fn api_get_asset(req: HttpRequest, path: web::Path) -> Result, +} + +#[derive(serde::Serialize)] +struct ActivityItem { + action: String, + mime: String, + size_bytes: usize, + timestamp: String, +} + +#[get("/api/stats")] +async fn api_stats() -> Result { + use crate::LOG_DIR; + use std::fs; + + let mut active_assets = 0; + let mut storage_bytes: u64 = 0; + let mut image_count = 0; + let mut text_count = 0; + + // Count active assets and calculate storage + if let Ok(entries) = fs::read_dir(DATA_STORAGE) { + for entry in entries.flatten() { + if let Ok(data) = fs::read(entry.path()) { + if let Ok(asset) = serde_json::from_slice::(&data) { + if !asset.is_expired() { + active_assets += 1; + storage_bytes += asset.size_bytes() as u64; + if asset.mime().starts_with("image/") { + image_count += 1; + } else if asset.mime().starts_with("text/") { + text_count += 1; + } + } + } + } + } + } + + // Parse log for upload/delete counts, response times, and recent activity + let mut total_uploads = 0; + let mut total_deleted = 0; + let mut recent_activity: Vec = Vec::new(); + let mut total_response_ms: f64 = 0.0; + let mut request_count: usize = 0; + + let log_path = format!("{}access.log", LOG_DIR); + if let Ok(content) = fs::read_to_string(&log_path) { + for line in content.lines() { + // Parse response time from request logs + if line.contains("dur_ms=") { + if let Some(dur_str) = line.split("dur_ms=").nth(1) { + if let Some(dur_val) = dur_str.split_whitespace().next() { + if let Ok(ms) = dur_val.parse::() { + total_response_ms += ms; + request_count += 1; + } + } + } + } + + if line.contains("event=asset") { + if line.contains("action=upload") { + total_uploads += 1; + } else if line.contains("action=delete_expired") { + total_deleted += 1; + } + + // Parse for recent activity (last 20) + if let Some(activity) = parse_activity_line(line) { + recent_activity.push(activity); + } + } + } + } + + let avg_response_ms = if request_count > 0 { total_response_ms / request_count as f64 } else { 0.0 }; + + // Keep only last 20, most recent first + recent_activity.reverse(); + recent_activity.truncate(20); + + let response = StatsResponse { + active_assets, + total_uploads, + total_deleted, + storage_bytes, + image_count, + text_count, + avg_response_ms, + total_requests: request_count, + recent_activity, + }; + + Ok(HttpResponse::Ok().json(response)) +} + +fn parse_activity_line(line: &str) -> Option { + let timestamp = line.split_whitespace().next()?.to_string(); + + let action = if line.contains("action=upload") { + "upload".to_string() + } else if line.contains("action=delete_expired") { + "delete".to_string() + } else { + return None; + }; + + let mime = line.split("mime=").nth(1)?.split_whitespace().next()?.to_string(); + + let size_bytes: usize = line + .split("size_bytes=") + .nth(1)? + .split_whitespace() + .next()? + .parse() + .ok()?; + + Some(ActivityItem { + action, + mime, + size_bytes, + timestamp, + }) +} diff --git a/src/data_mgt.rs b/src/data_mgt.rs index c393c28..69fa7cc 100644 --- a/src/data_mgt.rs +++ b/src/data_mgt.rs @@ -3,6 +3,7 @@ use chrono::{Duration, Utc}; use serde::{Deserialize, Serialize}; use crate::DATA_STORAGE; +use crate::logs::log_asset_event; #[derive(Debug, Serialize, Deserialize, Default)] pub struct Asset { @@ -12,10 +13,12 @@ pub struct Asset { expires_at: i64, mime: String, content: Vec, + #[serde(default)] + uploader_ip: Option, } impl Asset { - pub fn new(share_duration: u32, mime: String, content: Vec) -> Self { + pub fn new(share_duration: u32, mime: String, content: Vec, uploader_ip: Option) -> Self { let id = uuid::Uuid::new_v4().to_string(); let created_at = Utc::now().timestamp_millis(); let expires_at = created_at + Duration::minutes(share_duration as i64).num_milliseconds(); @@ -26,6 +29,7 @@ impl Asset { expires_at, mime, content, + uploader_ip, } } pub fn is_expired(&self) -> bool { @@ -44,6 +48,26 @@ impl Asset { self.content.clone() } + pub fn share_duration(&self) -> u32 { + self.share_duration + } + + pub fn created_at(&self) -> i64 { + self.created_at + } + + pub fn expires_at(&self) -> i64 { + self.expires_at + } + + pub fn size_bytes(&self) -> usize { + self.content.len() + } + + pub fn uploader_ip(&self) -> Option<&str> { + self.uploader_ip.as_deref() + } + pub fn to_bytes(&self) -> Result> { let bytes = serde_json::to_vec(self)?; Ok(bytes) @@ -68,6 +92,16 @@ pub async fn clear_assets() -> Result<()> { let asset = serde_json::from_slice::(&data)?; if asset.is_expired() { println!("Removing expired asset: {}", asset.id()); + log_asset_event( + "delete_expired", + asset.id(), + asset.mime(), + asset.size_bytes(), + asset.share_duration(), + asset.created_at(), + asset.expires_at(), + asset.uploader_ip().unwrap_or("-"), + ); std::fs::remove_file(&path)?; } } diff --git a/src/logs.rs b/src/logs.rs index 0595516..410f467 100644 --- a/src/logs.rs +++ b/src/logs.rs @@ -11,6 +11,12 @@ pub fn log_to_file(req: &HttpRequest, start: Instant) { let log_path = LOG_DIR.to_string() + "access.log"; + // Ensure log directory exists + if let Err(e) = std::fs::create_dir_all(LOG_DIR) { + eprintln!("failed to create log dir: {}", e); + return; + } + let Ok(mut file) = OpenOptions::new().create(true).append(true).open(log_path) else { eprintln!("failed to open log file"); return; @@ -40,3 +46,35 @@ pub fn log_to_file(req: &HttpRequest, start: Instant) { let _ = file.write_all(line.as_bytes()); } + +pub fn log_asset_event( + action: &str, + id: &str, + mime: &str, + size_bytes: usize, + duration_min: u32, + created_at_ms: i64, + expires_at_ms: i64, + uploader_ip: &str, +) { + // Ensure logging directory exists before writing + if let Err(e) = std::fs::create_dir_all(LOG_DIR) { + eprintln!("failed to create log dir for asset event: {}", e); + return; + } + + let log_path = LOG_DIR.to_string() + "access.log"; + + let Ok(mut file) = OpenOptions::new().create(true).append(true).open(log_path) else { + eprintln!("failed to open log file for asset event"); + return; + }; + + let ts = chrono::Local::now().to_rfc3339(); + + let line = format!( + "{ts} event=asset action={action} id={id} mime={mime} size_bytes={size_bytes} duration_min={duration_min} created_at_ms={created_at_ms} expires_at_ms={expires_at_ms} uploader_ip={uploader_ip}\n" + ); + + let _ = file.write_all(line.as_bytes()); +} diff --git a/src/main.rs b/src/main.rs index 00f1cb8..0129b9c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,9 +10,9 @@ use actix_web::{ use serde_json::Value; use std::{env, fs, path::PathBuf, sync::LazyLock}; -pub static HTML_DIR: &str = "html/"; -pub static LOG_DIR: &str = "logs/"; -pub static DATA_STORAGE: &str = "storage/"; +pub static HTML_DIR: &str = "data/html/"; +pub static LOG_DIR: &str = "data/logs/"; +pub static DATA_STORAGE: &str = "data/storage/"; pub static BIND_ADDR: LazyLock = LazyLock::new(|| match env::var("BIND_ADDR") { Ok(addr) => { @@ -43,7 +43,7 @@ pub static STATIC_PAGES: LazyLock> = LazyLock::new(|| { }); use crate::{ - api::{api_get_asset, api_upload}, + api::{api_get_asset, api_stats, api_upload}, logs::log_to_file, }; @@ -102,6 +102,7 @@ async fn main() -> std::io::Result<()> { .service(view_asset) .service(api_get_asset) .service(api_upload) + .service(api_stats) .service(catch_all) }) .bind((BIND_ADDR.clone(), *BIND_PORT))?