diff --git a/.github/workflows/test_build_release.yml b/.github/workflows/test_build_release.yml index 4acf14277..3924eea4b 100644 --- a/.github/workflows/test_build_release.yml +++ b/.github/workflows/test_build_release.yml @@ -45,7 +45,7 @@ jobs: uses: actions-rs/cargo@v1 with: command: clippy - args: --all-targets + args: --all-targets -- -D clippy::pedantic - uses: taiki-e/install-action@cargo-llvm-cov - uses: taiki-e/install-action@nextest - name: Run Tests diff --git a/.vscode/settings.json b/.vscode/settings.json index f1027e9bd..94f199bd6 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -3,4 +3,6 @@ "editor.formatOnSave": true }, "rust-analyzer.checkOnSave.command": "clippy", + "rust-analyzer.checkOnSave.allTargets": true, + "rust-analyzer.checkOnSave.extraArgs": ["--","-W","clippy::pedantic"], } \ No newline at end of file diff --git a/cSpell.json b/cSpell.json index a2c4235c4..cc3359d58 100644 --- a/cSpell.json +++ b/cSpell.json @@ -7,6 +7,7 @@ "bencode", "binascii", "Bitflu", + "bools", "bufs", "byteorder", "canonicalize", diff --git a/src/api/mod.rs b/src/api/mod.rs index 46ad24218..16abb8e27 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,2 @@ -pub mod resources; +pub mod resource; pub mod server; diff --git a/src/api/resources/auth_key_resource.rs b/src/api/resource/auth_key.rs similarity index 66% rename from src/api/resources/auth_key_resource.rs rename to src/api/resource/auth_key.rs index c38b7cc18..d5c08f496 100644 --- a/src/api/resources/auth_key_resource.rs +++ b/src/api/resource/auth_key.rs @@ -2,18 +2,18 @@ use std::convert::From; use serde::{Deserialize, Serialize}; -use crate::key::AuthKey; use crate::protocol::clock::DurationSinceUnixEpoch; +use crate::tracker::auth; -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct AuthKeyResource { +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct AuthKey { pub key: String, pub valid_until: Option, } -impl From for AuthKey { - fn from(auth_key_resource: AuthKeyResource) -> Self { - AuthKey { +impl From for auth::Key { + fn from(auth_key_resource: AuthKey) -> Self { + auth::Key { key: auth_key_resource.key, valid_until: auth_key_resource .valid_until @@ -22,9 +22,9 @@ impl From for AuthKey { } } -impl From for AuthKeyResource { - fn from(auth_key: AuthKey) -> Self { - AuthKeyResource { +impl From for AuthKey { + fn from(auth_key: auth::Key) -> Self { + AuthKey { key: auth_key.key, valid_until: auth_key.valid_until.map(|valid_until| valid_until.as_secs()), } @@ -35,50 +35,50 @@ impl From for AuthKeyResource { mod tests { use std::time::Duration; - use super::AuthKeyResource; - use crate::key::AuthKey; - use crate::protocol::clock::{DefaultClock, TimeNow}; + use super::AuthKey; + use crate::protocol::clock::{Current, TimeNow}; + use crate::tracker::auth; #[test] fn it_should_be_convertible_into_an_auth_key() { let duration_in_secs = 60; - let auth_key_resource = AuthKeyResource { + let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs), }; assert_eq!( - AuthKey::from(auth_key_resource), - AuthKey { + auth::Key::from(auth_key_resource), + auth::Key { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()) + valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()) } - ) + ); } #[test] fn it_should_be_convertible_from_an_auth_key() { let duration_in_secs = 60; - let auth_key = AuthKey { + let auth_key = auth::Key { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: Some(DefaultClock::add(&Duration::new(duration_in_secs, 0)).unwrap()), + valid_until: Some(Current::add(&Duration::new(duration_in_secs, 0)).unwrap()), }; assert_eq!( - AuthKeyResource::from(auth_key), - AuthKeyResource { + AuthKey::from(auth_key), + AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(duration_in_secs) } - ) + ); } #[test] fn it_should_be_convertible_into_json() { assert_eq!( - serde_json::to_string(&AuthKeyResource { + serde_json::to_string(&AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line valid_until: Some(60) }) diff --git a/src/api/resource/mod.rs b/src/api/resource/mod.rs new file mode 100644 index 000000000..e86c550ca --- /dev/null +++ b/src/api/resource/mod.rs @@ -0,0 +1,13 @@ +//! These are the Rest API resources. +//! +//! WIP. Not all endpoints have their resource structs. +//! +//! - [x] `AuthKeys` +//! - [ ] `Torrent`, `ListItem`, `Peer`, `PeerId` +//! - [ ] `StatsResource` +//! - [ ] ... + +pub mod auth_key; +pub mod peer; +pub mod stats; +pub mod torrent; diff --git a/src/api/resource/peer.rs b/src/api/resource/peer.rs new file mode 100644 index 000000000..ff84be197 --- /dev/null +++ b/src/api/resource/peer.rs @@ -0,0 +1,47 @@ +use serde::{Deserialize, Serialize}; + +use crate::tracker; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Peer { + pub peer_id: Id, + pub peer_addr: String, + #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] + pub updated: u128, + pub updated_milliseconds_ago: u128, + pub uploaded: i64, + pub downloaded: i64, + pub left: i64, + pub event: String, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Id { + pub id: Option, + pub client: Option, +} + +impl From for Id { + fn from(peer_id: tracker::peer::Id) -> Self { + Id { + id: peer_id.get_id(), + client: peer_id.get_client_name().map(std::string::ToString::to_string), + } + } +} + +impl From for Peer { + #[allow(deprecated)] + fn from(peer: tracker::peer::Peer) -> Self { + Peer { + peer_id: Id::from(peer.peer_id), + peer_addr: peer.peer_addr.to_string(), + updated: peer.updated.as_millis(), + updated_milliseconds_ago: peer.updated.as_millis(), + uploaded: peer.uploaded.0, + downloaded: peer.downloaded.0, + left: peer.left.0, + event: format!("{:?}", peer.event), + } + } +} diff --git a/src/api/resources/stats_resource.rs b/src/api/resource/stats.rs similarity index 87% rename from src/api/resources/stats_resource.rs rename to src/api/resource/stats.rs index 2fbaf42c1..e87f08f63 100644 --- a/src/api/resources/stats_resource.rs +++ b/src/api/resource/stats.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct StatsResource { +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Stats { pub torrents: u32, pub seeders: u32, pub completed: u32, diff --git a/src/api/resource/torrent.rs b/src/api/resource/torrent.rs new file mode 100644 index 000000000..924b61b8c --- /dev/null +++ b/src/api/resource/torrent.rs @@ -0,0 +1,21 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Torrent { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + #[serde(skip_serializing_if = "Option::is_none")] + pub peers: Option>, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ListItem { + pub info_hash: String, + pub seeders: u32, + pub completed: u32, + pub leechers: u32, + // todo: this is always None. Remove field from endpoint? + pub peers: Option>, +} diff --git a/src/api/resources/mod.rs b/src/api/resources/mod.rs deleted file mode 100644 index d214d8a59..000000000 --- a/src/api/resources/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! These are the Rest API resources. -//! -//! WIP. Not all endpoints have their resource structs. -//! -//! - [x] AuthKeys -//! - [ ] TorrentResource, TorrentListItemResource, TorrentPeerResource, PeerIdResource -//! - [ ] StatsResource -//! - [ ] ... -pub mod auth_key_resource; -pub mod stats_resource; -pub mod torrent_resource; diff --git a/src/api/resources/torrent_resource.rs b/src/api/resources/torrent_resource.rs deleted file mode 100644 index 11e9d7196..000000000 --- a/src/api/resources/torrent_resource.rs +++ /dev/null @@ -1,67 +0,0 @@ -use serde::{Deserialize, Serialize}; - -use crate::peer::TorrentPeer; -use crate::PeerId; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct TorrentResource { - pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - #[serde(skip_serializing_if = "Option::is_none")] - pub peers: Option>, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct TorrentListItemResource { - pub info_hash: String, - pub seeders: u32, - pub completed: u32, - pub leechers: u32, - // todo: this is always None. Remove field from endpoint? - pub peers: Option>, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct TorrentPeerResource { - pub peer_id: PeerIdResource, - pub peer_addr: String, - #[deprecated(since = "2.0.0", note = "please use `updated_milliseconds_ago` instead")] - pub updated: u128, - pub updated_milliseconds_ago: u128, - pub uploaded: i64, - pub downloaded: i64, - pub left: i64, - pub event: String, -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct PeerIdResource { - pub id: Option, - pub client: Option, -} - -impl From for PeerIdResource { - fn from(peer_id: PeerId) -> Self { - PeerIdResource { - id: peer_id.get_id(), - client: peer_id.get_client_name().map(|client_name| client_name.to_string()), - } - } -} - -impl From for TorrentPeerResource { - fn from(peer: TorrentPeer) -> Self { - TorrentPeerResource { - peer_id: PeerIdResource::from(peer.peer_id), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - updated_milliseconds_ago: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), - } - } -} diff --git a/src/api/server.rs b/src/api/server.rs index 41e6f7074..5967a8be4 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -7,11 +7,12 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; use warp::{filters, reply, serve, Filter}; -use super::resources::auth_key_resource::AuthKeyResource; -use super::resources::stats_resource::StatsResource; -use super::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; -use crate::protocol::common::*; -use crate::tracker::TorrentTracker; +use super::resource::auth_key::AuthKey; +use super::resource::peer; +use super::resource::stats::Stats; +use super::resource::torrent::{ListItem, Torrent}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker; #[derive(Deserialize, Debug)] struct TorrentInfoQuery { @@ -59,7 +60,8 @@ fn authenticate(tokens: HashMap) -> impl Filter) -> impl warp::Future { +#[allow(clippy::too_many_lines)] +pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { // GET /api/torrents?offset=:u32&limit=:u32 // View torrent list let api_torrents = tracker.clone(); @@ -71,7 +73,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = api_torrents.clone(); (limits, tracker) }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { let offset = limits.offset.unwrap_or(0); let limit = min(limits.limit.unwrap_or(1000), 4000); @@ -80,7 +82,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .iter() .map(|(info_hash, torrent_entry)| { let (seeders, completed, leechers) = torrent_entry.get_stats(); - TorrentListItemResource { + ListItem { info_hash: info_hash.to_string(), seeders, completed, @@ -102,8 +104,8 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("stats")) .and(filters::path::end()) .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { - let mut results = StatsResource { + .and_then(|tracker: Arc| async move { + let mut results = Stats { torrents: 0, seeders: 0, completed: 0, @@ -124,31 +126,31 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let db = tracker.get_torrents().await; - let _: Vec<_> = db - .iter() - .map(|(_info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }) - .collect(); + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); let stats = tracker.get_stats().await; - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) }); @@ -164,22 +166,23 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t2.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { let db = tracker.get_torrents().await; let torrent_entry_option = db.get(&info_hash); - if torrent_entry_option.is_none() { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - - let torrent_entry = torrent_entry_option.unwrap(); + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } + }; let (seeders, completed, leechers) = torrent_entry.get_stats(); let peers = torrent_entry.get_peers(None); - let peer_resources = peers.iter().map(|peer| TorrentPeerResource::from(**peer)).collect(); + let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - Ok(reply::json(&TorrentResource { + Ok(reply::json(&Torrent { info_hash: info_hash.to_string(), seeders, completed, @@ -199,7 +202,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t3.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { match tracker.remove_torrent_from_whitelist(&info_hash).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -219,7 +222,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t4.clone(); (info_hash, tracker) }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { match tracker.add_torrent_to_whitelist(&info_hash).await { Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(..) => Err(warp::reject::custom(ActionStatus::Err { @@ -239,9 +242,9 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t5.clone(); (seconds_valid, tracker) }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKeyResource::from(auth_key))), + Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), Err(..) => Err(warp::reject::custom(ActionStatus::Err { reason: "failed to generate key".into(), })), @@ -259,7 +262,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp let tracker = t6.clone(); (key, tracker) }) - .and_then(|(key, tracker): (String, Arc)| async move { + .and_then(|(key, tracker): (String, Arc)| async move { match tracker.remove_auth_key(&key).await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -276,7 +279,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("reload")) .and(filters::path::end()) .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { match tracker.load_whitelist().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { @@ -293,7 +296,7 @@ pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl warp .and(filters::path::path("reload")) .and(filters::path::end()) .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { + .and_then(|tracker: Arc| async move { match tracker.load_keys().await { Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), Err(_) => Err(warp::reject::custom(ActionStatus::Err { diff --git a/src/config.rs b/src/config.rs index 8c17070d2..a7e7e9df6 100644 --- a/src/config.rs +++ b/src/config.rs @@ -9,18 +9,18 @@ use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; -use crate::databases::database::DatabaseDrivers; -use crate::mode::TrackerMode; +use crate::databases::driver::Driver; +use crate::tracker::mode; #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct UdpTrackerConfig { +pub struct UdpTracker { pub enabled: bool, pub bind_address: String, } #[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct HttpTrackerConfig { +pub struct HttpTracker { pub enabled: bool, pub bind_address: String, pub ssl_enabled: bool, @@ -31,17 +31,18 @@ pub struct HttpTrackerConfig { } #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct HttpApiConfig { +pub struct HttpApi { pub enabled: bool, pub bind_address: String, pub access_tokens: HashMap, } +#[allow(clippy::struct_excessive_bools)] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct Configuration { pub log_level: Option, - pub mode: TrackerMode, - pub db_driver: DatabaseDrivers, + pub mode: mode::Mode, + pub db_driver: Driver, pub db_path: String, pub announce_interval: u32, pub min_announce_interval: u32, @@ -52,31 +53,36 @@ pub struct Configuration { pub persistent_torrent_completed_stat: bool, pub inactive_peer_cleanup_interval: u64, pub remove_peerless_torrents: bool, - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub http_api: HttpApiConfig, + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub http_api: HttpApi, } #[derive(Debug)] -pub enum ConfigurationError { +pub enum Error { + Message(String), + ConfigError(ConfigError), IOError(std::io::Error), ParseError(toml::de::Error), TrackerModeIncompatible, } -impl std::fmt::Display for ConfigurationError { +impl std::fmt::Display for Error { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self { - ConfigurationError::IOError(e) => e.fmt(f), - ConfigurationError::ParseError(e) => e.fmt(f), - _ => write!(f, "{:?}", self), + Error::Message(e) => e.fmt(f), + Error::ConfigError(e) => e.fmt(f), + Error::IOError(e) => e.fmt(f), + Error::ParseError(e) => e.fmt(f), + Error::TrackerModeIncompatible => write!(f, "{:?}", self), } } } -impl std::error::Error for ConfigurationError {} +impl std::error::Error for Error {} impl Configuration { + #[must_use] pub fn get_ext_ip(&self) -> Option { match &self.external_ip { None => None, @@ -87,11 +93,12 @@ impl Configuration { } } + #[must_use] pub fn default() -> Configuration { let mut configuration = Configuration { log_level: Option::from(String::from("info")), - mode: TrackerMode::Public, - db_driver: DatabaseDrivers::Sqlite3, + mode: mode::Mode::Public, + db_driver: Driver::Sqlite3, db_path: String::from("data.db"), announce_interval: 120, min_announce_interval: 120, @@ -104,7 +111,7 @@ impl Configuration { remove_peerless_torrents: true, udp_trackers: Vec::new(), http_trackers: Vec::new(), - http_api: HttpApiConfig { + http_api: HttpApi { enabled: true, bind_address: String::from("127.0.0.1:1212"), access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] @@ -113,11 +120,11 @@ impl Configuration { .collect(), }, }; - configuration.udp_trackers.push(UdpTrackerConfig { + configuration.udp_trackers.push(UdpTracker { enabled: false, bind_address: String::from("0.0.0.0:6969"), }); - configuration.http_trackers.push(HttpTrackerConfig { + configuration.http_trackers.push(HttpTracker { enabled: false, bind_address: String::from("0.0.0.0:6969"), ssl_enabled: false, @@ -127,32 +134,40 @@ impl Configuration { configuration } - pub fn load_from_file(path: &str) -> Result { + /// # Errors + /// + /// Will return `Err` if `path` does not exist or has a bad configuration. + pub fn load_from_file(path: &str) -> Result { let config_builder = Config::builder(); #[allow(unused_assignments)] let mut config = Config::default(); if Path::new(path).exists() { - config = config_builder.add_source(File::with_name(path)).build()?; + config = config_builder + .add_source(File::with_name(path)) + .build() + .map_err(Error::ConfigError)?; } else { eprintln!("No config file found."); eprintln!("Creating config file.."); let config = Configuration::default(); - let _ = config.save_to_file(path); - return Err(ConfigError::Message( + config.save_to_file(path)?; + return Err(Error::Message( "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), )); } - let torrust_config: Configuration = config - .try_deserialize() - .map_err(|e| ConfigError::Message(format!("Errors while processing config: {}.", e)))?; + let torrust_config: Configuration = config.try_deserialize().map_err(Error::ConfigError)?; Ok(torrust_config) } - pub fn save_to_file(&self, path: &str) -> Result<(), ConfigurationError> { + /// # Errors + /// + /// Will return `Err` if `filename` does not exist or the user does not have + /// permission to read it. + pub fn save_to_file(&self, path: &str) -> Result<(), Error> { let toml_string = toml::to_string(self).expect("Could not encode TOML value"); fs::write(path, toml_string).expect("Could not write to file!"); Ok(()) @@ -161,6 +176,7 @@ impl Configuration { #[cfg(test)] mod tests { + use crate::config::{Configuration, Error}; #[cfg(test)] fn default_config_toml() -> String { @@ -197,7 +213,7 @@ mod tests { admin = "MyAccessToken" "# .lines() - .map(|line| line.trim_start()) + .map(str::trim_start) .collect::>() .join("\n"); config @@ -205,8 +221,6 @@ mod tests { #[test] fn configuration_should_have_default_values() { - use crate::Configuration; - let configuration = Configuration::default(); let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); @@ -216,8 +230,6 @@ mod tests { #[test] fn configuration_should_contain_the_external_ip() { - use crate::Configuration; - let configuration = Configuration::default(); assert_eq!(configuration.external_ip, Option::Some(String::from("0.0.0.0"))); @@ -229,8 +241,6 @@ mod tests { use uuid::Uuid; - use crate::Configuration; - // Build temp config file path let temp_directory = env::temp_dir(); let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); @@ -275,8 +285,6 @@ mod tests { #[test] fn configuration_should_be_loaded_from_a_toml_config_file() { - use crate::Configuration; - let config_file_path = create_temp_config_file_with_default_config(); let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); @@ -286,9 +294,7 @@ mod tests { #[test] fn configuration_error_could_be_displayed() { - use crate::ConfigurationError; - - let error = ConfigurationError::TrackerModeIncompatible; + let error = Error::TrackerModeIncompatible; assert_eq!(format!("{}", error), "TrackerModeIncompatible"); } diff --git a/src/databases/database.rs b/src/databases/database.rs deleted file mode 100644 index 795be0d45..000000000 --- a/src/databases/database.rs +++ /dev/null @@ -1,87 +0,0 @@ -use async_trait::async_trait; -use derive_more::{Display, Error}; -use serde::{Deserialize, Serialize}; - -use crate::databases::mysql::MysqlDatabase; -use crate::databases::sqlite::SqliteDatabase; -use crate::tracker::key::AuthKey; -use crate::InfoHash; - -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub enum DatabaseDrivers { - Sqlite3, - MySQL, -} - -pub fn connect_database(db_driver: &DatabaseDrivers, db_path: &str) -> Result, r2d2::Error> { - let database: Box = match db_driver { - DatabaseDrivers::Sqlite3 => { - let db = SqliteDatabase::new(db_path)?; - Box::new(db) - } - DatabaseDrivers::MySQL => { - let db = MysqlDatabase::new(db_path)?; - Box::new(db) - } - }; - - database.create_database_tables().expect("Could not create database tables."); - - Ok(database) -} - -#[async_trait] -pub trait Database: Sync + Send { - fn create_database_tables(&self) -> Result<(), Error>; - - async fn load_persistent_torrents(&self) -> Result, Error>; - - async fn load_keys(&self) -> Result, Error>; - - async fn load_whitelist(&self) -> Result, Error>; - - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; - - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; - - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; - - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; - - async fn get_key_from_keys(&self, key: &str) -> Result; - - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result; - - async fn remove_key_from_keys(&self, key: &str) -> Result; - - async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - if let Err(e) = self.get_info_hash_from_whitelist(&info_hash.to_owned().to_string()).await { - if let Error::QueryReturnedNoRows = e { - return Ok(false); - } else { - return Err(e); - } - } - Ok(true) - } -} - -#[derive(Debug, Display, PartialEq, Eq, Error)] -#[allow(dead_code)] -pub enum Error { - #[display(fmt = "Query returned no rows.")] - QueryReturnedNoRows, - #[display(fmt = "Invalid query.")] - InvalidQuery, - #[display(fmt = "Database error.")] - DatabaseError, -} - -impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - match e { - r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, - _ => Error::InvalidQuery, - } - } -} diff --git a/src/databases/driver.rs b/src/databases/driver.rs new file mode 100644 index 000000000..7eaa9064e --- /dev/null +++ b/src/databases/driver.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] +pub enum Driver { + Sqlite3, + MySQL, +} diff --git a/src/databases/error.rs b/src/databases/error.rs new file mode 100644 index 000000000..467db407f --- /dev/null +++ b/src/databases/error.rs @@ -0,0 +1,21 @@ +use derive_more::{Display, Error}; + +#[derive(Debug, Display, PartialEq, Eq, Error)] +#[allow(dead_code)] +pub enum Error { + #[display(fmt = "Query returned no rows.")] + QueryReturnedNoRows, + #[display(fmt = "Invalid query.")] + InvalidQuery, + #[display(fmt = "Database error.")] + DatabaseError, +} + +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + match e { + r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows, + _ => Error::InvalidQuery, + } + } +} diff --git a/src/databases/mod.rs b/src/databases/mod.rs index 169d99f4d..c1d265b56 100644 --- a/src/databases/mod.rs +++ b/src/databases/mod.rs @@ -1,3 +1,73 @@ -pub mod database; +pub mod driver; +pub mod error; pub mod mysql; pub mod sqlite; + +use async_trait::async_trait; + +use self::driver::Driver; +use self::error::Error; +use crate::databases::mysql::Mysql; +use crate::databases::sqlite::Sqlite; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth; + +/// # Errors +/// +/// Will return `r2d2::Error` if `db_path` is not able to create a database. +pub fn connect(db_driver: &Driver, db_path: &str) -> Result, r2d2::Error> { + let database: Box = match db_driver { + Driver::Sqlite3 => { + let db = Sqlite::new(db_path)?; + Box::new(db) + } + Driver::MySQL => { + let db = Mysql::new(db_path)?; + Box::new(db) + } + }; + + database.create_database_tables().expect("Could not create database tables."); + + Ok(database) +} + +#[async_trait] +pub trait Database: Sync + Send { + /// # Errors + /// + /// Will return `Error` if unable to create own tables. + fn create_database_tables(&self) -> Result<(), Error>; + + async fn load_persistent_torrents(&self) -> Result, Error>; + + async fn load_keys(&self) -> Result, Error>; + + async fn load_whitelist(&self) -> Result, Error>; + + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result; + + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + + async fn get_key_from_keys(&self, key: &str) -> Result; + + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result; + + async fn remove_key_from_keys(&self, key: &str) -> Result; + + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { + self.get_info_hash_from_whitelist(&info_hash.clone().to_string()) + .await + .map_or_else( + |e| match e { + Error::QueryReturnedNoRows => Ok(false), + e => Err(e), + }, + |_| Ok(true), + ) + } +} diff --git a/src/databases/mysql.rs b/src/databases/mysql.rs index fc6ff5098..8322b2273 100644 --- a/src/databases/mysql.rs +++ b/src/databases/mysql.rs @@ -8,16 +8,19 @@ use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MysqlConnectionManager; -use crate::databases::database; -use crate::databases::database::{Database, Error}; -use crate::tracker::key::AuthKey; -use crate::{InfoHash, AUTH_KEY_LENGTH}; +use crate::databases::{Database, Error}; +use crate::protocol::common::AUTH_KEY_LENGTH; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth; -pub struct MysqlDatabase { +pub struct Mysql { pool: Pool, } -impl MysqlDatabase { +impl Mysql { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. pub fn new(db_path: &str) -> Result { let opts = Opts::from_url(db_path).expect("Failed to connect to MySQL database."); let builder = OptsBuilder::from_opts(opts); @@ -31,8 +34,8 @@ impl MysqlDatabase { } #[async_trait] -impl Database for MysqlDatabase { - fn create_database_tables(&self) -> Result<(), database::Error> { +impl Database for Mysql { + fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id integer PRIMARY KEY AUTO_INCREMENT, @@ -57,10 +60,10 @@ impl Database for MysqlDatabase { PRIMARY KEY (`id`), UNIQUE (`key`) );", - AUTH_KEY_LENGTH as i8 + i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") ); - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); @@ -71,8 +74,8 @@ impl Database for MysqlDatabase { Ok(()) } - async fn load_persistent_torrents(&self) -> Result, database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_persistent_torrents(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let torrents: Vec<(InfoHash, u32)> = conn .query_map( @@ -82,41 +85,41 @@ impl Database for MysqlDatabase { (info_hash, completed) }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_keys(&self) -> Result, Error> { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; - let keys: Vec = conn + let keys: Vec = conn .query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| AuthKey { + |(key, valid_until): (String, i64)| auth::Key { key, - valid_until: Some(Duration::from_secs(valid_until as u64)), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, ) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(keys) } async fn load_whitelist(&self) -> Result, Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hashes: Vec = conn .query_map("SELECT info_hash FROM whitelist", |info_hash: String| { InfoHash::from_str(&info_hash).unwrap() }) - .map_err(|_| database::Error::QueryReturnedNoRows)?; + .map_err(|_| Error::QueryReturnedNoRows)?; Ok(info_hashes) } - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -128,28 +131,28 @@ impl Database for MysqlDatabase { } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn .exec_first::( "SELECT info_hash FROM whitelist WHERE info_hash = :info_hash", params! { info_hash }, ) - .map_err(|_| database::Error::DatabaseError)? + .map_err(|_| Error::DatabaseError)? { Some(info_hash) => Ok(InfoHash::from_str(&info_hash).unwrap()), - None => Err(database::Error::QueryReturnedNoRows), + None => Err(Error::QueryReturnedNoRows), } } - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash_str = info_hash.to_string(); @@ -160,13 +163,13 @@ impl Database for MysqlDatabase { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let info_hash = info_hash.to_string(); @@ -174,28 +177,28 @@ impl Database for MysqlDatabase { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn .exec_first::<(String, i64), _, _>("SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { key }) - .map_err(|_| database::Error::QueryReturnedNoRows)? + .map_err(|_| Error::QueryReturnedNoRows)? { - Some((key, valid_until)) => Ok(AuthKey { + Some((key, valid_until)) => Ok(auth::Key { key, - valid_until: Some(Duration::from_secs(valid_until as u64)), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }), - None => Err(database::Error::InvalidQuery), + None => Err(Error::InvalidQuery), } } - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let key = auth_key.key.to_string(); let valid_until = auth_key.valid_until.unwrap_or(Duration::ZERO).as_secs().to_string(); @@ -207,19 +210,19 @@ impl Database for MysqlDatabase { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_key_from_keys(&self, key: &str) -> Result { - let mut conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_key_from_keys(&self, key: &str) -> Result { + let mut conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { key }) { Ok(_) => Ok(1), Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } diff --git a/src/databases/sqlite.rs b/src/databases/sqlite.rs index 7a567b07e..c5401aacf 100644 --- a/src/databases/sqlite.rs +++ b/src/databases/sqlite.rs @@ -5,27 +5,29 @@ use log::debug; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use crate::databases::database; -use crate::databases::database::{Database, Error}; +use crate::databases::{Database, Error}; use crate::protocol::clock::DurationSinceUnixEpoch; -use crate::tracker::key::AuthKey; -use crate::InfoHash; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::auth; -pub struct SqliteDatabase { +pub struct Sqlite { pool: Pool, } -impl SqliteDatabase { - pub fn new(db_path: &str) -> Result { +impl Sqlite { + /// # Errors + /// + /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. + pub fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); let pool = Pool::new(cm).expect("Failed to create r2d2 SQLite connection pool."); - Ok(SqliteDatabase { pool }) + Ok(Sqlite { pool }) } } #[async_trait] -impl Database for SqliteDatabase { - fn create_database_tables(&self) -> Result<(), database::Error> { +impl Database for Sqlite { + fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -49,17 +51,17 @@ impl Database for SqliteDatabase { );" .to_string(); - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; conn.execute(&create_whitelist_table, []) .and_then(|_| conn.execute(&create_keys_table, [])) .and_then(|_| conn.execute(&create_torrents_table, [])) - .map_err(|_| database::Error::InvalidQuery) + .map_err(|_| Error::InvalidQuery) .map(|_| ()) } - async fn load_persistent_torrents(&self) -> Result, database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_persistent_torrents(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -70,13 +72,13 @@ impl Database for SqliteDatabase { Ok((info_hash, completed)) })?; - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(|x| x.ok()).collect(); + let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); Ok(torrents) } - async fn load_keys(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn load_keys(&self) -> Result, Error> { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -84,19 +86,19 @@ impl Database for SqliteDatabase { let key = row.get(0)?; let valid_until: i64 = row.get(1)?; - Ok(AuthKey { + Ok(auth::Key { key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) })?; - let keys: Vec = keys_iter.filter_map(|x| x.ok()).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } async fn load_whitelist(&self) -> Result, Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; @@ -106,13 +108,13 @@ impl Database for SqliteDatabase { Ok(InfoHash::from_str(&info_hash).unwrap()) })?; - let info_hashes: Vec = info_hash_iter.filter_map(|x| x.ok()).collect(); + let info_hashes: Vec = info_hash_iter.filter_map(std::result::Result::ok).collect(); Ok(info_hashes) } - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), database::Error> { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute( "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", @@ -122,17 +124,17 @@ impl Database for SqliteDatabase { if updated > 0 { return Ok(()); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_info_hash_from_whitelist(&self, info_hash: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; let mut rows = stmt.query([info_hash])?; @@ -140,70 +142,70 @@ impl Database for SqliteDatabase { match rows.next() { Ok(row) => match row { Some(row) => Ok(InfoHash::from_str(&row.get_unwrap::<_, String>(0)).unwrap()), - None => Err(database::Error::QueryReturnedNoRows), + None => Err(Error::QueryReturnedNoRows), }, Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn get_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn get_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; let mut rows = stmt.query([key.to_string()])?; if let Some(row) = rows.next()? { let key: String = row.get(0).unwrap(); - let valid_until_i64: i64 = row.get(1).unwrap(); + let valid_until: i64 = row.get(1).unwrap(); - Ok(AuthKey { + Ok(auth::Key { key, - valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until_i64 as u64)), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }) } else { - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } } - async fn add_key_to_keys(&self, auth_key: &AuthKey) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn add_key_to_keys(&self, auth_key: &auth::Key) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute( "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", @@ -213,28 +215,28 @@ impl Database for SqliteDatabase { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } - async fn remove_key_from_keys(&self, key: &str) -> Result { - let conn = self.pool.get().map_err(|_| database::Error::DatabaseError)?; + async fn remove_key_from_keys(&self, key: &str) -> Result { + let conn = self.pool.get().map_err(|_| Error::DatabaseError)?; match conn.execute("DELETE FROM keys WHERE key = ?", [key]) { Ok(updated) => { if updated > 0 { return Ok(updated); } - Err(database::Error::QueryReturnedNoRows) + Err(Error::QueryReturnedNoRows) } Err(e) => { debug!("{:?}", e); - Err(database::Error::InvalidQuery) + Err(Error::InvalidQuery) } } } diff --git a/src/http/errors.rs b/src/http/error.rs similarity index 86% rename from src/http/errors.rs rename to src/http/error.rs index fe0cf26e6..b6c08a8ba 100644 --- a/src/http/errors.rs +++ b/src/http/error.rs @@ -2,12 +2,12 @@ use thiserror::Error; use warp::reject::Reject; #[derive(Error, Debug)] -pub enum ServerError { +pub enum Error { #[error("internal server error")] - InternalServerError, + InternalServer, #[error("info_hash is either missing or invalid")] - InvalidInfoHash, + InvalidInfo, #[error("peer_id is either missing or invalid")] InvalidPeerId, @@ -31,4 +31,4 @@ pub enum ServerError { ExceededInfoHashLimit, } -impl Reject for ServerError {} +impl Reject for Error {} diff --git a/src/http/filters.rs b/src/http/filters.rs index 42d1592ff..0fe369eba 100644 --- a/src/http/filters.rs +++ b/src/http/filters.rs @@ -5,62 +5,73 @@ use std::sync::Arc; use warp::{reject, Filter, Rejection}; -use crate::http::{AnnounceRequest, AnnounceRequestQuery, ScrapeRequest, ServerError, WebResult}; -use crate::tracker::key::AuthKey; -use crate::tracker::TorrentTracker; -use crate::{InfoHash, PeerId, MAX_SCRAPE_TORRENTS}; - -/// Pass Arc along -pub fn with_tracker(tracker: Arc) -> impl Filter,), Error = Infallible> + Clone { +use super::error::Error; +use super::{request, WebResult}; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::{self, auth, peer}; + +/// Pass Arc along +#[must_use] +pub fn with_tracker( + tracker: Arc, +) -> impl Filter,), Error = Infallible> + Clone { warp::any().map(move || tracker.clone()) } /// Check for infoHash +#[must_use] pub fn with_info_hash() -> impl Filter,), Error = Rejection> + Clone { - warp::filters::query::raw().and_then(info_hashes) + warp::filters::query::raw().and_then(|q| async move { info_hashes(&q) }) } -/// Check for PeerId -pub fn with_peer_id() -> impl Filter + Clone { - warp::filters::query::raw().and_then(peer_id) +/// Check for `PeerId` +#[must_use] +pub fn with_peer_id() -> impl Filter + Clone { + warp::filters::query::raw().and_then(|q| async move { peer_id(&q) }) } -/// Pass Arc along -pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { +/// Pass Arc along +#[must_use] +pub fn with_auth_key() -> impl Filter,), Error = Infallible> + Clone { warp::path::param::() - .map(|key: String| AuthKey::from_string(&key)) - .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) + .map(|key: String| auth::Key::from_string(&key)) + .or_else(|_| async { Ok::<(Option,), Infallible>((None,)) }) } -/// Check for PeerAddress +/// Check for `PeerAddress` +#[must_use] pub fn with_peer_addr(on_reverse_proxy: bool) -> impl Filter + Clone { warp::addr::remote() .and(warp::header::optional::("X-Forwarded-For")) .map(move |remote_addr: Option, x_forwarded_for: Option| { (on_reverse_proxy, remote_addr, x_forwarded_for) }) - .and_then(peer_addr) + .and_then(|q| async move { peer_addr(q) }) } -/// Check for AnnounceRequest -pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { - warp::filters::query::query::() +/// Check for `request::Announce` +#[must_use] +pub fn with_announce_request(on_reverse_proxy: bool) -> impl Filter + Clone { + warp::filters::query::query::() .and(with_info_hash()) .and(with_peer_id()) .and(with_peer_addr(on_reverse_proxy)) - .and_then(announce_request) + .and_then(|q, r, s, t| async move { announce_request(q, &r, s, t) }) } -/// Check for ScrapeRequest -pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { +/// Check for `ScrapeRequest` +#[must_use] +pub fn with_scrape_request(on_reverse_proxy: bool) -> impl Filter + Clone { warp::any() .and(with_info_hash()) .and(with_peer_addr(on_reverse_proxy)) - .and_then(scrape_request) + .and_then(|q, r| async move { scrape_request(q, r) }) } -/// Parse InfoHash from raw query string -async fn info_hashes(raw_query: String) -> WebResult> { +/// Parse `InfoHash` from raw query string +#[allow(clippy::ptr_arg)] +fn info_hashes(raw_query: &String) -> WebResult> { let split_raw_query: Vec<&str> = raw_query.split('&').collect(); let mut info_hashes: Vec = Vec::new(); @@ -76,20 +87,21 @@ async fn info_hashes(raw_query: String) -> WebResult> { } if info_hashes.len() > MAX_SCRAPE_TORRENTS as usize { - Err(reject::custom(ServerError::ExceededInfoHashLimit)) + Err(reject::custom(Error::ExceededInfoHashLimit)) } else if info_hashes.is_empty() { - Err(reject::custom(ServerError::InvalidInfoHash)) + Err(reject::custom(Error::InvalidInfo)) } else { Ok(info_hashes) } } -/// Parse PeerId from raw query string -async fn peer_id(raw_query: String) -> WebResult { +/// Parse `PeerId` from raw query string +#[allow(clippy::ptr_arg)] +fn peer_id(raw_query: &String) -> WebResult { // put all query params in a vec let split_raw_query: Vec<&str> = raw_query.split('&').collect(); - let mut peer_id: Option = None; + let mut peer_id: Option = None; for v in split_raw_query { // look for the peer_id param @@ -102,61 +114,59 @@ async fn peer_id(raw_query: String) -> WebResult { // peer_id must be 20 bytes if peer_id_bytes.len() != 20 { - return Err(reject::custom(ServerError::InvalidPeerId)); + return Err(reject::custom(Error::InvalidPeerId)); } // clone peer_id_bytes into fixed length array let mut byte_arr: [u8; 20] = Default::default(); byte_arr.clone_from_slice(peer_id_bytes.as_slice()); - peer_id = Some(PeerId(byte_arr)); + peer_id = Some(peer::Id(byte_arr)); break; } } - if peer_id.is_none() { - Err(reject::custom(ServerError::InvalidPeerId)) - } else { - Ok(peer_id.unwrap()) + match peer_id { + Some(id) => Ok(id), + None => Err(reject::custom(Error::InvalidPeerId)), } } -/// Get PeerAddress from RemoteAddress or Forwarded -async fn peer_addr( - (on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option), -) -> WebResult { +/// Get `PeerAddress` from `RemoteAddress` or Forwarded +fn peer_addr((on_reverse_proxy, remote_addr, x_forwarded_for): (bool, Option, Option)) -> WebResult { if !on_reverse_proxy && remote_addr.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound)); } if on_reverse_proxy && x_forwarded_for.is_none() { - return Err(reject::custom(ServerError::AddressNotFound)); + return Err(reject::custom(Error::AddressNotFound)); } - match on_reverse_proxy { - true => { - let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); - // remove whitespace chars - x_forwarded_for_raw.retain(|c| !c.is_whitespace()); - // get all forwarded ip's in a vec - let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); - // set client ip to last forwarded ip - let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); - - IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(ServerError::AddressNotFound)) - } - false => Ok(remote_addr.unwrap().ip()), + if on_reverse_proxy { + let mut x_forwarded_for_raw = x_forwarded_for.unwrap(); + // remove whitespace chars + x_forwarded_for_raw.retain(|c| !c.is_whitespace()); + // get all forwarded ip's in a vec + let x_forwarded_ips: Vec<&str> = x_forwarded_for_raw.split(',').collect(); + // set client ip to last forwarded ip + let x_forwarded_ip = *x_forwarded_ips.last().unwrap(); + + IpAddr::from_str(x_forwarded_ip).map_err(|_| reject::custom(Error::AddressNotFound)) + } else { + Ok(remote_addr.unwrap().ip()) } } -/// Parse AnnounceRequest from raw AnnounceRequestQuery, InfoHash and Option -async fn announce_request( - announce_request_query: AnnounceRequestQuery, - info_hashes: Vec, - peer_id: PeerId, +/// Parse `AnnounceRequest` from raw `AnnounceRequestQuery`, `InfoHash` and Option +#[allow(clippy::unnecessary_wraps)] +#[allow(clippy::ptr_arg)] +fn announce_request( + announce_request_query: request::AnnounceQuery, + info_hashes: &Vec, + peer_id: peer::Id, peer_addr: IpAddr, -) -> WebResult { - Ok(AnnounceRequest { +) -> WebResult { + Ok(request::Announce { info_hash: info_hashes[0], peer_addr, downloaded: announce_request_query.downloaded.unwrap_or(0), @@ -169,7 +179,8 @@ async fn announce_request( }) } -/// Parse ScrapeRequest from InfoHash -async fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { - Ok(ScrapeRequest { info_hashes, peer_addr }) +/// Parse `ScrapeRequest` from `InfoHash` +#[allow(clippy::unnecessary_wraps)] +fn scrape_request(info_hashes: Vec, peer_addr: IpAddr) -> WebResult { + Ok(request::Scrape { info_hashes, peer_addr }) } diff --git a/src/http/handlers.rs b/src/http/handlers.rs index 87d2d51f6..1170b7188 100644 --- a/src/http/handlers.rs +++ b/src/http/handlers.rs @@ -7,54 +7,48 @@ use log::debug; use warp::http::Response; use warp::{reject, Rejection, Reply}; -use crate::http::{ - AnnounceRequest, AnnounceResponse, ErrorResponse, Peer, ScrapeRequest, ScrapeResponse, ScrapeResponseEntry, ServerError, - WebResult, -}; -use crate::peer::TorrentPeer; -use crate::tracker::key::AuthKey; -use crate::tracker::statistics::TrackerStatisticsEvent; -use crate::tracker::torrent::{TorrentError, TorrentStats}; -use crate::tracker::TorrentTracker; -use crate::InfoHash; - -/// Authenticate InfoHash using optional AuthKey +use super::error::Error; +use super::{request, response, WebResult}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::{self, auth, peer, statistics, torrent}; + +/// Authenticate `InfoHash` using optional `auth::Key` +/// +/// # Errors +/// +/// Will return `ServerError` that wraps the `Error` if unable to `authenticate_request`. pub async fn authenticate( info_hash: &InfoHash, - auth_key: &Option, - tracker: Arc, -) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, auth_key).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } + auth_key: &Option, + tracker: Arc, +) -> Result<(), Error> { + tracker.authenticate_request(info_hash, auth_key).await.map_err(|e| match e { + torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, + torrent::Error::NoPeersFound => Error::NoPeersFound, + torrent::Error::CouldNotSendResponse => Error::InternalServer, + torrent::Error::InvalidInfoHash => Error::InvalidInfo, + }) } /// Handle announce request +/// +/// # Errors +/// +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_announce( - announce_request: AnnounceRequest, - auth_key: Option, - tracker: Arc, + announce_request: request::Announce, + auth_key: Option, + tracker: Arc, ) -> WebResult { - if let Err(e) = authenticate(&announce_request.info_hash, &auth_key, tracker.clone()).await { - return Err(reject::custom(e)); - } + authenticate(&announce_request.info_hash, &auth_key, tracker.clone()) + .await + .map_err(reject::custom)?; debug!("{:?}", announce_request); - let peer = - TorrentPeer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); + let peer = peer::Peer::from_http_announce_request(&announce_request, announce_request.peer_addr, tracker.config.get_ext_ip()); let torrent_stats = tracker .update_torrent_with_peer_and_get_stats(&announce_request.info_hash, &peer) .await; @@ -67,50 +61,54 @@ pub async fn handle_announce( // send stats event match announce_request.peer_addr { IpAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Announce).await; + tracker.send_stats_event(statistics::Event::Tcp4Announce).await; } IpAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Announce).await; + tracker.send_stats_event(statistics::Event::Tcp6Announce).await; } } send_announce_response( &announce_request, - torrent_stats, - peers, + &torrent_stats, + &peers, announce_interval, tracker.config.min_announce_interval, ) } /// Handle scrape request +/// +/// # Errors +/// +/// Will return `warp::Rejection` that wraps the `ServerError` if unable to `send_scrape_response`. pub async fn handle_scrape( - scrape_request: ScrapeRequest, - auth_key: Option, - tracker: Arc, + scrape_request: request::Scrape, + auth_key: Option, + tracker: Arc, ) -> WebResult { - let mut files: HashMap = HashMap::new(); + let mut files: HashMap = HashMap::new(); let db = tracker.get_torrents().await; - for info_hash in scrape_request.info_hashes.iter() { + for info_hash in &scrape_request.info_hashes { let scrape_entry = match db.get(info_hash) { Some(torrent_info) => { if authenticate(info_hash, &auth_key, tracker.clone()).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); - ScrapeResponseEntry { + response::ScrapeEntry { complete: seeders, downloaded: completed, incomplete: leechers, } } else { - ScrapeResponseEntry { + response::ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, } } } - None => ScrapeResponseEntry { + None => response::ScrapeEntry { complete: 0, downloaded: 0, incomplete: 0, @@ -123,10 +121,10 @@ pub async fn handle_scrape( // send stats event match scrape_request.peer_addr { IpAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp4Scrape).await; + tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; } IpAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Tcp6Scrape).await; + tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; } } @@ -134,23 +132,24 @@ pub async fn handle_scrape( } /// Send announce response +#[allow(clippy::ptr_arg)] fn send_announce_response( - announce_request: &AnnounceRequest, - torrent_stats: TorrentStats, - peers: Vec, + announce_request: &request::Announce, + torrent_stats: &torrent::SwamStats, + peers: &Vec, interval: u32, interval_min: u32, ) -> WebResult { - let http_peers: Vec = peers + let http_peers: Vec = peers .iter() - .map(|peer| Peer { + .map(|peer| response::Peer { peer_id: peer.peer_id.to_string(), ip: peer.peer_addr.ip(), port: peer.peer_addr.port(), }) .collect(); - let res = AnnounceResponse { + let res = response::Announce { interval, interval_min, complete: torrent_stats.seeders, @@ -162,7 +161,7 @@ fn send_announce_response( if let Some(1) = announce_request.compact { match res.write_compact() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)), + Err(_) => Err(reject::custom(Error::InternalServer)), } } else { Ok(Response::new(res.write().into())) @@ -170,26 +169,30 @@ fn send_announce_response( } /// Send scrape response -fn send_scrape_response(files: HashMap) -> WebResult { - let res = ScrapeResponse { files }; +fn send_scrape_response(files: HashMap) -> WebResult { + let res = response::Scrape { files }; match res.write() { Ok(body) => Ok(Response::new(body)), - Err(_) => Err(reject::custom(ServerError::InternalServerError)), + Err(_) => Err(reject::custom(Error::InternalServer)), } } /// Handle all server errors and send error reply -pub async fn send_error(r: Rejection) -> std::result::Result { - let body = if let Some(server_error) = r.find::() { +/// +/// # Errors +/// +/// Will not return a error, `Infallible`, but instead convert the `ServerError` into a `Response`. +pub fn send_error(r: &Rejection) -> std::result::Result { + let body = if let Some(server_error) = r.find::() { debug!("{:?}", server_error); - ErrorResponse { + response::Error { failure_reason: server_error.to_string(), } .write() } else { - ErrorResponse { - failure_reason: ServerError::InternalServerError.to_string(), + response::Error { + failure_reason: Error::InternalServer.to_string(), } .write() }; diff --git a/src/http/mod.rs b/src/http/mod.rs index 4842c0a25..701dba407 100644 --- a/src/http/mod.rs +++ b/src/http/mod.rs @@ -1,12 +1,4 @@ -pub use self::errors::*; -pub use self::filters::*; -pub use self::handlers::*; -pub use self::request::*; -pub use self::response::*; -pub use self::routes::*; -pub use self::server::*; - -pub mod errors; +pub mod error; pub mod filters; pub mod handlers; pub mod request; diff --git a/src/http/request.rs b/src/http/request.rs index 6dd025e8c..bc549b698 100644 --- a/src/http/request.rs +++ b/src/http/request.rs @@ -3,10 +3,11 @@ use std::net::IpAddr; use serde::Deserialize; use crate::http::Bytes; -use crate::{InfoHash, PeerId}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::peer; #[derive(Deserialize)] -pub struct AnnounceRequestQuery { +pub struct AnnounceQuery { pub downloaded: Option, pub uploaded: Option, pub key: Option, @@ -17,19 +18,19 @@ pub struct AnnounceRequestQuery { } #[derive(Debug)] -pub struct AnnounceRequest { +pub struct Announce { pub info_hash: InfoHash, pub peer_addr: IpAddr, pub downloaded: Bytes, pub uploaded: Bytes, - pub peer_id: PeerId, + pub peer_id: peer::Id, pub port: u16, pub left: Bytes, pub event: Option, pub compact: Option, } -pub struct ScrapeRequest { +pub struct Scrape { pub info_hashes: Vec, pub peer_addr: IpAddr, } diff --git a/src/http/response.rs b/src/http/response.rs index c87b5e0e8..962e72fac 100644 --- a/src/http/response.rs +++ b/src/http/response.rs @@ -1,12 +1,11 @@ use std::collections::HashMap; -use std::error::Error; use std::io::Write; use std::net::IpAddr; use serde; use serde::Serialize; -use crate::InfoHash; +use crate::protocol::info_hash::InfoHash; #[derive(Serialize)] pub struct Peer { @@ -16,7 +15,7 @@ pub struct Peer { } #[derive(Serialize)] -pub struct AnnounceResponse { +pub struct Announce { pub interval: u32, #[serde(rename = "min interval")] pub interval_min: u32, @@ -26,12 +25,19 @@ pub struct AnnounceResponse { pub peers: Vec, } -impl AnnounceResponse { +impl Announce { + /// # Panics + /// + /// It would panic if the `Announce` struct would contain an inappropriate type. + #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } - pub fn write_compact(&self) -> Result, Box> { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write_compact(&self) -> Result, Box> { let mut peers_v4: Vec = Vec::new(); let mut peers_v6: Vec = Vec::new(); @@ -72,24 +78,27 @@ impl AnnounceResponse { } #[derive(Serialize)] -pub struct ScrapeResponseEntry { +pub struct ScrapeEntry { pub complete: u32, pub downloaded: u32, pub incomplete: u32, } #[derive(Serialize)] -pub struct ScrapeResponse { - pub files: HashMap, +pub struct Scrape { + pub files: HashMap, } -impl ScrapeResponse { - pub fn write(&self) -> Result, Box> { +impl Scrape { + /// # Errors + /// + /// Will return `Err` if internally interrupted. + pub fn write(&self) -> Result, Box> { let mut bytes: Vec = Vec::new(); bytes.write_all(b"d5:filesd")?; - for (info_hash, scrape_response_entry) in self.files.iter() { + for (info_hash, scrape_response_entry) in &self.files { bytes.write_all(b"20:")?; bytes.write_all(&info_hash.0)?; bytes.write_all(b"d8:completei")?; @@ -108,12 +117,16 @@ impl ScrapeResponse { } #[derive(Serialize)] -pub struct ErrorResponse { +pub struct Error { #[serde(rename = "failure reason")] pub failure_reason: String, } -impl ErrorResponse { +impl Error { + /// # Panics + /// + /// It would panic if the `Error` struct would contain an inappropriate type. + #[must_use] pub fn write(&self) -> String { serde_bencode::to_string(&self).unwrap() } diff --git a/src/http/routes.rs b/src/http/routes.rs index 8bfaf5ed9..c46c502e4 100644 --- a/src/http/routes.rs +++ b/src/http/routes.rs @@ -3,18 +3,20 @@ use std::sync::Arc; use warp::{Filter, Rejection}; -use crate::http::{ - handle_announce, handle_scrape, send_error, with_announce_request, with_auth_key, with_scrape_request, with_tracker, -}; -use crate::tracker::TorrentTracker; +use super::filters::{with_announce_request, with_auth_key, with_scrape_request, with_tracker}; +use super::handlers::{handle_announce, handle_scrape, send_error}; +use crate::tracker; /// All routes -pub fn routes(tracker: Arc) -> impl Filter + Clone { - announce(tracker.clone()).or(scrape(tracker)).recover(send_error) +#[must_use] +pub fn routes(tracker: Arc) -> impl Filter + Clone { + announce(tracker.clone()) + .or(scrape(tracker)) + .recover(|q| async move { send_error(&q) }) } /// GET /announce or /announce/ -fn announce(tracker: Arc) -> impl Filter + Clone { +fn announce(tracker: Arc) -> impl Filter + Clone { warp::path::path("announce") .and(warp::filters::method::get()) .and(with_announce_request(tracker.config.on_reverse_proxy)) @@ -24,7 +26,7 @@ fn announce(tracker: Arc) -> impl Filter -fn scrape(tracker: Arc) -> impl Filter + Clone { +fn scrape(tracker: Arc) -> impl Filter + Clone { warp::path::path("scrape") .and(warp::filters::method::get()) .and(with_scrape_request(tracker.config.on_reverse_proxy)) diff --git a/src/http/server.rs b/src/http/server.rs index 4e48f97e3..894d3e911 100644 --- a/src/http/server.rs +++ b/src/http/server.rs @@ -1,37 +1,39 @@ use std::net::SocketAddr; use std::sync::Arc; -use crate::http::routes; -use crate::tracker::TorrentTracker; +use super::routes; +use crate::tracker; -/// Server that listens on HTTP, needs a TorrentTracker +/// Server that listens on HTTP, needs a `tracker::TorrentTracker` #[derive(Clone)] -pub struct HttpServer { - tracker: Arc, +pub struct Http { + tracker: Arc, } -impl HttpServer { - pub fn new(tracker: Arc) -> HttpServer { - HttpServer { tracker } +impl Http { + #[must_use] + pub fn new(tracker: Arc) -> Http { + Http { tracker } } - /// Start the HttpServer + /// Start the `HttpServer` pub fn start(&self, socket_addr: SocketAddr) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); + let (_addr, server) = + warp::serve(routes::routes(self.tracker.clone())).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); server } - /// Start the HttpServer in TLS mode + /// Start the `HttpServer` in TLS mode pub fn start_tls( &self, socket_addr: SocketAddr, ssl_cert_path: String, ssl_key_path: String, ) -> impl warp::Future { - let (_addr, server) = warp::serve(routes(self.tracker.clone())) + let (_addr, server) = warp::serve(routes::routes(self.tracker.clone())) .tls() .cert_path(ssl_cert_path) .key_path(ssl_key_path) diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index 2d8f307b4..b8f031f5a 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -4,17 +4,22 @@ use std::sync::Arc; use log::{info, warn}; use tokio::task::JoinHandle; -use crate::tracker::TorrentTracker; -use crate::{HttpServer, HttpTrackerConfig}; +use crate::config::HttpTracker; +use crate::http::server::Http; +use crate::tracker; -pub fn start_job(config: &HttpTrackerConfig, tracker: Arc) -> JoinHandle<()> { +/// # Panics +/// +/// It would panic if the `config::HttpTracker` struct would contain an inappropriate values. +#[must_use] +pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.parse::().unwrap(); let ssl_enabled = config.ssl_enabled; let ssl_cert_path = config.ssl_cert_path.clone(); let ssl_key_path = config.ssl_key_path.clone(); tokio::spawn(async move { - let http_tracker = HttpServer::new(tracker); + let http_tracker = Http::new(tracker); if !ssl_enabled { info!("Starting HTTP server on: {}", bind_addr); diff --git a/src/jobs/torrent_cleanup.rs b/src/jobs/torrent_cleanup.rs index 04b064043..073ceda61 100644 --- a/src/jobs/torrent_cleanup.rs +++ b/src/jobs/torrent_cleanup.rs @@ -4,11 +4,12 @@ use chrono::Utc; use log::info; use tokio::task::JoinHandle; -use crate::tracker::TorrentTracker; -use crate::Configuration; +use crate::config::Configuration; +use crate::tracker; -pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(&tracker); +#[must_use] +pub fn start_job(config: &Configuration, tracker: &Arc) -> JoinHandle<()> { + let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; tokio::spawn(async move { @@ -27,7 +28,7 @@ pub fn start_job(config: &Configuration, tracker: Arc) -> JoinHa let start_time = Utc::now().time(); info!("Cleaning up torrents.."); tracker.cleanup_torrents().await; - info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()) + info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; } diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index ba5b8a1fb..2c00aa453 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -5,13 +5,16 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; -use crate::tracker::TorrentTracker; -use crate::Configuration; +use crate::config::Configuration; +use crate::tracker; #[derive(Debug)] pub struct ApiServerJobStarted(); -pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { let bind_addr = config .http_api .bind_address @@ -24,11 +27,9 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> // Run the API server let join_handle = tokio::spawn(async move { - let handel = server::start(bind_addr, tracker); + let handel = server::start(bind_addr, &tracker); - if tx.send(ApiServerJobStarted()).is_err() { - panic!("the start job dropped"); - } + tx.send(ApiServerJobStarted()).expect("the start job dropped"); handel.await; }); @@ -36,7 +37,7 @@ pub async fn start_job(config: &Configuration, tracker: Arc) -> // Wait until the API server job is running match rx.await { Ok(_msg) => info!("Torrust API server started"), - Err(_) => panic!("the api server dropped"), + Err(e) => panic!("the api server dropped: {e}"), } join_handle diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 00fdaddbe..57369f660 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -3,14 +3,16 @@ use std::sync::Arc; use log::{error, info, warn}; use tokio::task::JoinHandle; -use crate::tracker::TorrentTracker; -use crate::{UdpServer, UdpTrackerConfig}; +use crate::config::UdpTracker; +use crate::tracker; +use crate::udp::server::Udp; -pub fn start_job(config: &UdpTrackerConfig, tracker: Arc) -> JoinHandle<()> { +#[must_use] +pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { let bind_addr = config.bind_address.clone(); tokio::spawn(async move { - match UdpServer::new(tracker, &bind_addr).await { + match Udp::new(tracker, &bind_addr).await { Ok(udp_server) => { info!("Starting UDP server on: {}", bind_addr); udp_server.start().await; diff --git a/src/lib.rs b/src/lib.rs index cf830f108..7e4fe13a7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,3 @@ -pub use api::server::*; -pub use http::server::*; -pub use protocol::common::*; -pub use udp::server::*; - -pub use self::config::*; -pub use self::tracker::*; - pub mod api; pub mod config; pub mod databases; diff --git a/src/logging.rs b/src/logging.rs index 5d0efa8a4..4d16f7670 100644 --- a/src/logging.rs +++ b/src/logging.rs @@ -3,11 +3,11 @@ use std::sync::Once; use log::{info, LevelFilter}; -use crate::Configuration; +use crate::config::Configuration; static INIT: Once = Once::new(); -pub fn setup_logging(cfg: &Configuration) { +pub fn setup(cfg: &Configuration) { let level = config_level_or_default(&cfg.log_level); if level == log::LevelFilter::Off { @@ -35,7 +35,7 @@ fn stdout_config(level: LevelFilter) { record.target(), record.level(), message - )) + )); }) .level(level) .chain(std::io::stdout()) diff --git a/src/main.rs b/src/main.rs index bf832dbf4..a7316cef2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,9 @@ use std::sync::Arc; use log::info; +use torrust_tracker::config::Configuration; use torrust_tracker::stats::setup_statistics; -use torrust_tracker::tracker::TorrentTracker; -use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, Configuration}; +use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, tracker}; #[tokio::main] async fn main() { @@ -27,7 +27,7 @@ async fn main() { let (stats_event_sender, stats_repository) = setup_statistics(config.tracker_usage_statistics); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(config.clone(), stats_event_sender, stats_repository) { + let tracker = match tracker::Tracker::new(&config.clone(), stats_event_sender, stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) @@ -35,7 +35,7 @@ async fn main() { }; // Initialize logging - logging::setup_logging(&config); + logging::setup(&config); // Run jobs let jobs = setup::setup(&config, tracker.clone()).await; diff --git a/src/protocol/clock/mod.rs b/src/protocol/clock/mod.rs index 4e15950e6..7868d4c5e 100644 --- a/src/protocol/clock/mod.rs +++ b/src/protocol/clock/mod.rs @@ -4,7 +4,7 @@ use std::time::Duration; pub type DurationSinceUnixEpoch = Duration; #[derive(Debug)] -pub enum ClockType { +pub enum Type { WorkingClock, StoppedClock, } @@ -12,23 +12,25 @@ pub enum ClockType { #[derive(Debug)] pub struct Clock; -pub type WorkingClock = Clock<{ ClockType::WorkingClock as usize }>; -pub type StoppedClock = Clock<{ ClockType::StoppedClock as usize }>; +pub type Working = Clock<{ Type::WorkingClock as usize }>; +pub type Stopped = Clock<{ Type::StoppedClock as usize }>; #[cfg(not(test))] -pub type DefaultClock = WorkingClock; +pub type Current = Working; #[cfg(test)] -pub type DefaultClock = StoppedClock; +pub type Current = Stopped; pub trait Time: Sized { fn now() -> DurationSinceUnixEpoch; } pub trait TimeNow: Time { + #[must_use] fn add(add_time: &Duration) -> Option { Self::now().checked_add(*add_time) } + #[must_use] fn sub(sub_time: &Duration) -> Option { Self::now().checked_sub(*sub_time) } @@ -38,44 +40,52 @@ pub trait TimeNow: Time { mod tests { use std::any::TypeId; - use crate::protocol::clock::{DefaultClock, StoppedClock, Time, WorkingClock}; + use crate::protocol::clock::{Current, Stopped, Time, Working}; #[test] fn it_should_be_the_stopped_clock_as_default_when_testing() { // We are testing, so we should default to the fixed time. - assert_eq!(TypeId::of::(), TypeId::of::()); - assert_eq!(StoppedClock::now(), DefaultClock::now()) + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), Current::now()); } #[test] fn it_should_have_different_times() { - assert_ne!(TypeId::of::(), TypeId::of::()); - assert_ne!(StoppedClock::now(), WorkingClock::now()) + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); } } mod working_clock { use std::time::SystemTime; - use super::{DurationSinceUnixEpoch, Time, TimeNow, WorkingClock}; + use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; - impl Time for WorkingClock { + impl Time for Working { fn now() -> DurationSinceUnixEpoch { SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() } } - impl TimeNow for WorkingClock {} + impl TimeNow for Working {} } pub trait StoppedTime: TimeNow { fn local_set(unix_time: &DurationSinceUnixEpoch); fn local_set_to_unix_epoch() { - Self::local_set(&DurationSinceUnixEpoch::ZERO) + Self::local_set(&DurationSinceUnixEpoch::ZERO); } fn local_set_to_app_start_time(); fn local_set_to_system_time_now(); + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; fn local_reset(); } @@ -84,9 +94,9 @@ mod stopped_clock { use std::num::IntErrorKind; use std::time::Duration; - use super::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow}; + use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; - impl Time for StoppedClock { + impl Time for Stopped { fn now() -> DurationSinceUnixEpoch { detail::FIXED_TIME.with(|time| { return *time.borrow(); @@ -94,21 +104,21 @@ mod stopped_clock { } } - impl TimeNow for StoppedClock {} + impl TimeNow for Stopped {} - impl StoppedTime for StoppedClock { + impl StoppedTime for Stopped { fn local_set(unix_time: &DurationSinceUnixEpoch) { detail::FIXED_TIME.with(|time| { *time.borrow_mut() = *unix_time; - }) + }); } fn local_set_to_app_start_time() { - Self::local_set(&detail::get_app_start_time()) + Self::local_set(&detail::get_app_start_time()); } fn local_set_to_system_time_now() { - Self::local_set(&detail::get_app_start_time()) + Self::local_set(&detail::get_app_start_time()); } fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { @@ -138,7 +148,7 @@ mod stopped_clock { } fn local_reset() { - Self::local_set(&detail::get_default_fixed_time()) + Self::local_set(&detail::get_default_fixed_time()); } } @@ -147,58 +157,58 @@ mod stopped_clock { use std::thread; use std::time::Duration; - use crate::protocol::clock::{DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, TimeNow, WorkingClock}; + use crate::protocol::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; #[test] fn it_should_default_to_zero_when_testing() { - assert_eq!(StoppedClock::now(), DurationSinceUnixEpoch::ZERO) + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); } #[test] fn it_should_possible_to_set_the_time() { // Check we start with ZERO. - assert_eq!(StoppedClock::now(), Duration::ZERO); + assert_eq!(Stopped::now(), Duration::ZERO); // Set to Current Time and Check - let timestamp = WorkingClock::now(); - StoppedClock::local_set(×tamp); - assert_eq!(StoppedClock::now(), timestamp); + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); // Elapse the Current Time and Check - StoppedClock::local_add(×tamp).unwrap(); - assert_eq!(StoppedClock::now(), timestamp + timestamp); + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); // Reset to ZERO and Check - StoppedClock::local_reset(); - assert_eq!(StoppedClock::now(), Duration::ZERO); + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); } #[test] fn it_should_default_to_zero_on_thread_exit() { - assert_eq!(StoppedClock::now(), Duration::ZERO); - let after5 = WorkingClock::add(&Duration::from_secs(5)).unwrap(); - StoppedClock::local_set(&after5); - assert_eq!(StoppedClock::now(), after5); + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); let t = thread::spawn(move || { // each thread starts out with the initial value of ZERO - assert_eq!(StoppedClock::now(), Duration::ZERO); + assert_eq!(Stopped::now(), Duration::ZERO); // and gets set to the current time. - let timestamp = WorkingClock::now(); - StoppedClock::local_set(×tamp); - assert_eq!(StoppedClock::now(), timestamp); + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); }); // wait for the thread to complete and bail out on panic t.join().unwrap(); // we retain our original value of current time + 5sec despite the child thread - assert_eq!(StoppedClock::now(), after5); + assert_eq!(Stopped::now(), after5); // Reset to ZERO and Check - StoppedClock::local_reset(); - assert_eq!(StoppedClock::now(), Duration::ZERO); + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); } } @@ -240,7 +250,7 @@ mod stopped_clock { #[test] fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1662983731, 22312); + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); } } diff --git a/src/protocol/clock/time_extent.rs b/src/protocol/clock/time_extent.rs index 3fa60de82..b4c20cd70 100644 --- a/src/protocol/clock/time_extent.rs +++ b/src/protocol/clock/time_extent.rs @@ -1,7 +1,7 @@ use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; -use super::{ClockType, StoppedClock, TimeNow, WorkingClock}; +use super::{Stopped, TimeNow, Type, Working}; pub trait Extent: Sized + Default { type Base; @@ -10,36 +10,44 @@ pub trait Extent: Sized + Default { fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; + /// # Errors + /// + /// Will return `IntErrorKind` if `add` would overflow the internal `Duration`. fn increase(&self, add: Self::Multiplier) -> Result; + + /// # Errors + /// + /// Will return `IntErrorKind` if `sub` would underflow the internal `Duration`. fn decrease(&self, sub: Self::Multiplier) -> Result; fn total(&self) -> Option>; fn total_next(&self) -> Option>; } -pub type TimeExtentBase = Duration; -pub type TimeExtentMultiplier = u64; -pub type TimeExtentProduct = TimeExtentBase; +pub type Base = Duration; +pub type Multiplier = u64; +pub type Product = Base; #[derive(Debug, Default, Hash, PartialEq, Eq)] pub struct TimeExtent { - pub increment: TimeExtentBase, - pub amount: TimeExtentMultiplier, + pub increment: Base, + pub amount: Multiplier, } pub const ZERO: TimeExtent = TimeExtent { - increment: TimeExtentBase::ZERO, - amount: TimeExtentMultiplier::MIN, + increment: Base::ZERO, + amount: Multiplier::MIN, }; pub const MAX: TimeExtent = TimeExtent { - increment: TimeExtentBase::MAX, - amount: TimeExtentMultiplier::MAX, + increment: Base::MAX, + amount: Multiplier::MAX, }; impl TimeExtent { - pub const fn from_sec(seconds: u64, amount: &TimeExtentMultiplier) -> Self { + #[must_use] + pub const fn from_sec(seconds: u64, amount: &Multiplier) -> Self { Self { - increment: TimeExtentBase::from_secs(seconds), + increment: Base::from_secs(seconds), amount: *amount, } } @@ -48,10 +56,10 @@ impl TimeExtent { fn checked_duration_from_nanos(time: u128) -> Result { const NANOS_PER_SEC: u32 = 1_000_000_000; - let secs = time.div_euclid(NANOS_PER_SEC as u128); - let nanos = time.rem_euclid(NANOS_PER_SEC as u128); + let secs = time.div_euclid(u128::from(NANOS_PER_SEC)); + let nanos = time.rem_euclid(u128::from(NANOS_PER_SEC)); - assert!(nanos < NANOS_PER_SEC as u128); + assert!(nanos < u128::from(NANOS_PER_SEC)); match u64::try_from(secs) { Err(error) => Err(error), @@ -60,9 +68,9 @@ fn checked_duration_from_nanos(time: u128) -> Result } impl Extent for TimeExtent { - type Base = TimeExtentBase; - type Multiplier = TimeExtentMultiplier; - type Product = TimeExtentProduct; + type Base = Base; + type Multiplier = Multiplier; + type Product = Product; fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { Self { @@ -94,69 +102,70 @@ impl Extent for TimeExtent { fn total(&self) -> Option> { self.increment .as_nanos() - .checked_mul(self.amount as u128) + .checked_mul(u128::from(self.amount)) .map(checked_duration_from_nanos) } fn total_next(&self) -> Option> { self.increment .as_nanos() - .checked_mul((self.amount as u128) + 1) + .checked_mul(u128::from(self.amount) + 1) .map(checked_duration_from_nanos) } } -pub trait MakeTimeExtent: Sized +pub trait Make: Sized where Clock: TimeNow, { - fn now(increment: &TimeExtentBase) -> Option> { + #[must_use] + fn now(increment: &Base) -> Option> { Clock::now() .as_nanos() .checked_div((*increment).as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { + .map(|amount| match Multiplier::try_from(amount) { Err(error) => Err(error), Ok(amount) => Ok(TimeExtent::new(increment, &amount)), }) } - fn now_after(increment: &TimeExtentBase, add_time: &Duration) -> Option> { + #[must_use] + fn now_after(increment: &Base, add_time: &Duration) -> Option> { match Clock::add(add_time) { None => None, - Some(time) => { - time.as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }) - } + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), } } - fn now_before(increment: &TimeExtentBase, sub_time: &Duration) -> Option> { + #[must_use] + fn now_before(increment: &Base, sub_time: &Duration) -> Option> { match Clock::sub(sub_time) { None => None, - Some(time) => { - time.as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match TimeExtentMultiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }) - } + Some(time) => time + .as_nanos() + .checked_div(increment.as_nanos()) + .map(|amount| match Multiplier::try_from(amount) { + Err(error) => Err(error), + Ok(amount) => Ok(TimeExtent::new(increment, &amount)), + }), } } } #[derive(Debug)] -pub struct TimeExtentMaker {} +pub struct Maker {} -pub type WorkingTimeExtentMaker = TimeExtentMaker<{ ClockType::WorkingClock as usize }>; -pub type StoppedTimeExtentMaker = TimeExtentMaker<{ ClockType::StoppedClock as usize }>; +pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; -impl MakeTimeExtent for WorkingTimeExtentMaker {} -impl MakeTimeExtent for StoppedTimeExtentMaker {} +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} #[cfg(not(test))] pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; @@ -168,12 +177,11 @@ pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; mod test { use crate::protocol::clock::time_extent::{ - checked_duration_from_nanos, DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent, TimeExtentBase, - TimeExtentMultiplier, TimeExtentProduct, MAX, ZERO, + checked_duration_from_nanos, Base, DefaultTimeExtentMaker, Extent, Make, Multiplier, Product, TimeExtent, MAX, ZERO, }; - use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedTime}; + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; - const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239812388723); + const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); mod fn_checked_duration_from_nanos { use std::time::Duration; @@ -190,11 +198,11 @@ mod test { #[test] fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { assert_eq!( - checked_duration_from_nanos(1232143214343432).unwrap(), - Duration::from_nanos(1232143214343432) + checked_duration_from_nanos(1_232_143_214_343_432).unwrap(), + Duration::from_nanos(1_232_143_214_343_432) ); assert_eq!( - checked_duration_from_nanos(u64::MAX as u128).unwrap(), + checked_duration_from_nanos(u128::from(u64::MAX)).unwrap(), Duration::from_nanos(u64::MAX) ); } @@ -202,7 +210,7 @@ mod test { #[test] fn it_should_work_for_some_numbers_larger_than_u64() { assert_eq!( - checked_duration_from_nanos(TIME_EXTENT_VAL.amount as u128 * NANOS_PER_SEC as u128).unwrap(), + checked_duration_from_nanos(u128::from(TIME_EXTENT_VAL.amount) * u128::from(NANOS_PER_SEC)).unwrap(), Duration::from_secs(TIME_EXTENT_VAL.amount) ); } @@ -234,7 +242,7 @@ mod test { #[test] fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::from_sec(u64::MIN, &TimeExtentMultiplier::MIN), ZERO); + assert_eq!(TimeExtent::from_sec(u64::MIN, &Multiplier::MIN), ZERO); } #[test] fn it_should_make_from_seconds() { @@ -250,15 +258,15 @@ mod test { #[test] fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::new(&TimeExtentBase::ZERO, &TimeExtentMultiplier::MIN), ZERO); + assert_eq!(TimeExtent::new(&Base::ZERO, &Multiplier::MIN), ZERO); } #[test] fn it_should_make_new() { assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &TIME_EXTENT_VAL.amount), + TimeExtent::new(&Base::from_millis(2), &TIME_EXTENT_VAL.amount), TimeExtent { - increment: TimeExtentBase::from_millis(2), + increment: Base::from_millis(2), amount: TIME_EXTENT_VAL.amount } ); @@ -324,30 +332,27 @@ mod test { #[test] fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!(ZERO.total().unwrap().unwrap(), Product::ZERO); } #[test] fn it_should_give_a_total() { assert_eq!( TIME_EXTENT_VAL.total().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) .total() .unwrap() .unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX)) - .total() - .unwrap() - .unwrap(), - TimeExtentProduct::from_secs(u64::MAX) + TimeExtent::new(&Base::from_secs(1), &(u64::MAX)).total().unwrap().unwrap(), + Product::from_secs(u64::MAX) ); } @@ -374,33 +379,33 @@ mod test { #[test] fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total_next().unwrap().unwrap(), TimeExtentProduct::ZERO); + assert_eq!(ZERO.total_next().unwrap().unwrap(), Product::ZERO); } #[test] fn it_should_give_a_total() { assert_eq!( TIME_EXTENT_VAL.total_next().unwrap().unwrap(), - TimeExtentProduct::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) + Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) + TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) .total_next() .unwrap() .unwrap(), - TimeExtentProduct::new( + Product::new( TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), - TimeExtentBase::from_millis(2).as_nanos().try_into().unwrap() + Base::from_millis(2).as_nanos().try_into().unwrap() ) ); assert_eq!( - TimeExtent::new(&TimeExtentBase::from_secs(1), &(u64::MAX - 1)) + TimeExtent::new(&Base::from_secs(1), &(u64::MAX - 1)) .total_next() .unwrap() .unwrap(), - TimeExtentProduct::from_secs(u64::MAX) + Product::from_secs(u64::MAX) ); } @@ -439,7 +444,7 @@ mod test { } ); - DefaultClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), @@ -449,16 +454,14 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!(DefaultTimeExtentMaker::now(&TimeExtentBase::ZERO), None); + assert_eq!(DefaultTimeExtentMaker::now(&Base::ZERO), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now(&TimeExtentBase::from_millis(1)) - .unwrap() - .unwrap_err(), + DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), u64::try_from(u128::MAX).unwrap_err() ); } @@ -484,20 +487,17 @@ mod test { #[test] fn it_should_fail_for_zero() { - assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!(DefaultTimeExtentMaker::now_after(&TimeExtentBase::ZERO, &Duration::MAX), None); + Current::local_set(&DurationSinceUnixEpoch::MAX); + assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now_after(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -511,40 +511,34 @@ mod test { #[test] fn it_should_give_a_time_extent() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before( - &TimeExtentBase::from_secs(u32::MAX as u64), - &Duration::from_secs(u32::MAX as u64) + &Base::from_secs(u64::from(u32::MAX)), + &Duration::from_secs(u64::from(u32::MAX)) ) .unwrap() .unwrap(), TimeExtent { - increment: TimeExtentBase::from_secs(u32::MAX as u64), - amount: 4294967296 + increment: Base::from_secs(u64::from(u32::MAX)), + amount: 4_294_967_296 } ); } #[test] fn it_should_fail_for_zero() { - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::ZERO), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::ZERO), None); - assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::ZERO, &Duration::MAX), - None - ); + assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - DefaultClock::local_set(&DurationSinceUnixEpoch::MAX); + Current::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( - DefaultTimeExtentMaker::now_before(&TimeExtentBase::from_millis(1), &Duration::ZERO) + DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) .unwrap() .unwrap_err(), u64::try_from(u128::MAX).unwrap_err() diff --git a/src/protocol/common.rs b/src/protocol/common.rs index ce1cbf253..527ae9ebc 100644 --- a/src/protocol/common.rs +++ b/src/protocol/common.rs @@ -25,315 +25,3 @@ pub enum AnnounceEventDef { #[derive(Serialize, Deserialize)] #[serde(remote = "NumberOfBytes")] pub struct NumberOfBytesDef(pub i64); - -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash(pub [u8; 20]); - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - self.0.partial_cmp(&other.0) - } -} - -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); - ret.0.clone_from_slice(data); - ret - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) - } -} - -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use serde::{Deserialize, Serialize}; - use serde_json::json; - - use crate::InfoHash; - - #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] - struct ContainingInfoHash { - pub info_hash: InfoHash, - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); - assert!(info_hash.is_ok()); - } - - #[test] - fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { - let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { - let info_hash = InfoHash::from_str(&"F".repeat(39)); - assert!(info_hash.is_err()); - - let info_hash = InfoHash::from_str(&"F".repeat(41)); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - - let output = format!("{}", info_hash); - - assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { - let info_hash: InfoHash = [255u8; 20].as_slice().into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { - let info_hash: InfoHash = [255u8; 20].into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_serialized() { - let s = ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), - }; - - let json_serialized_value = serde_json::to_string(&s).unwrap(); - - assert_eq!( - json_serialized_value, - r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# - ); - } - - #[test] - fn an_info_hash_can_be_deserialized() { - let json = json!({ - "info_hash": "ffffffffffffffffffffffffffffffffffffffff", - }); - - let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); - - assert_eq!( - s, - ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - } - ); - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a 40 character long string", - )); - } - - let mut res = InfoHash([0u8; 20]); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"expected a hexadecimal string", - )); - } else { - Ok(res) - } - } -} - -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct PeerId(pub [u8; 20]); - -impl std::fmt::Display for PeerId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut buffer = [0u8; 20]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); - match bytes_out { - Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), - None => write!(f, ""), - } - } -} - -impl PeerId { - pub fn get_id(&self) -> Option { - let buff_size = self.0.len() * 2; - let mut tmp: Vec = vec![0; buff_size]; - binascii::bin2hex(&self.0, &mut tmp).unwrap(); - - std::str::from_utf8(&tmp).ok().map(|id| id.to_string()) - } - - pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { - return Some("BitTorrent"); - } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { - b"AG" => "Ares", - b"A~" => "Ares", - b"AR" => "Arctic", - b"AV" => "Avicora", - b"AX" => "BitPump", - b"AZ" => "Azureus", - b"BB" => "BitBuddy", - b"BC" => "BitComet", - b"BF" => "Bitflu", - b"BG" => "BTG (uses Rasterbar libtorrent)", - b"BR" => "BitRocket", - b"BS" => "BTSlave", - b"BX" => "~Bittorrent X", - b"CD" => "Enhanced CTorrent", - b"CT" => "CTorrent", - b"DE" => "DelugeTorrent", - b"DP" => "Propagate Data Client", - b"EB" => "EBit", - b"ES" => "electric sheep", - b"FT" => "FoxTorrent", - b"FW" => "FrostWire", - b"FX" => "Freebox BitTorrent", - b"GS" => "GSTorrent", - b"HL" => "Halite", - b"HN" => "Hydranode", - b"KG" => "KGet", - b"KT" => "KTorrent", - b"LH" => "LH-ABC", - b"LP" => "Lphant", - b"LT" => "libtorrent", - b"lt" => "libTorrent", - b"LW" => "LimeWire", - b"MO" => "MonoTorrent", - b"MP" => "MooPolice", - b"MR" => "Miro", - b"MT" => "MoonlightTorrent", - b"NX" => "Net Transport", - b"PD" => "Pando", - b"qB" => "qBittorrent", - b"QD" => "QQDownload", - b"QT" => "Qt 4 Torrent example", - b"RT" => "Retriever", - b"S~" => "Shareaza alpha/beta", - b"SB" => "~Swiftbit", - b"SS" => "SwarmScope", - b"ST" => "SymTorrent", - b"st" => "sharktorrent", - b"SZ" => "Shareaza", - b"TN" => "TorrentDotNET", - b"TR" => "Transmission", - b"TS" => "Torrentstorm", - b"TT" => "TuoTu", - b"UL" => "uLeecher!", - b"UT" => "µTorrent", - b"UW" => "µTorrent Web", - b"VG" => "Vagaa", - b"WD" => "WebTorrent Desktop", - b"WT" => "BitLet", - b"WW" => "WebTorrent", - b"WY" => "FireTorrent", - b"XL" => "Xunlei", - b"XT" => "XanTorrent", - b"XX" => "Xtorrent", - b"ZT" => "ZipTorrent", - _ => return None, - }; - Some(name) - } else { - None - } - } -} - -impl Serialize for PeerId { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - #[derive(Serialize)] - struct PeerIdInfo<'a> { - id: Option, - client: Option<&'a str>, - } - - let obj = PeerIdInfo { - id: self.get_id(), - client: self.get_client_name(), - }; - obj.serialize(serializer) - } -} diff --git a/src/protocol/crypto.rs b/src/protocol/crypto.rs index 18cfaf5e6..a335e2dba 100644 --- a/src/protocol/crypto.rs +++ b/src/protocol/crypto.rs @@ -1,18 +1,18 @@ pub mod keys { pub mod seeds { - use self::detail::DEFAULT_SEED; + use self::detail::CURRENT_SEED; use crate::ephemeral_instance_keys::{Seed, RANDOM_SEED}; - pub trait SeedKeeper { + pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; fn get_seed() -> &'static Self::Seed; } - pub struct InstanceSeed; - pub struct DefaultSeed; + pub struct Instance; + pub struct Current; - impl SeedKeeper for InstanceSeed { + impl Keeper for Instance { type Seed = Seed; fn get_seed() -> &'static Self::Seed { @@ -20,24 +20,24 @@ pub mod keys { } } - impl SeedKeeper for DefaultSeed { + impl Keeper for Current { type Seed = Seed; #[allow(clippy::needless_borrow)] fn get_seed() -> &'static Self::Seed { - &DEFAULT_SEED + &CURRENT_SEED } } #[cfg(test)] mod tests { use super::detail::ZEROED_TEST_SEED; - use super::{DefaultSeed, InstanceSeed, SeedKeeper}; + use super::{Current, Instance, Keeper}; use crate::ephemeral_instance_keys::Seed; pub struct ZeroedTestSeed; - impl SeedKeeper for ZeroedTestSeed { + impl Keeper for ZeroedTestSeed { type Seed = Seed; #[allow(clippy::needless_borrow)] @@ -48,12 +48,12 @@ pub mod keys { #[test] fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(DefaultSeed::get_seed(), ZeroedTestSeed::get_seed()) + assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); } #[test] fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(DefaultSeed::get_seed(), InstanceSeed::get_seed()) + assert_ne!(Current::get_seed(), Instance::get_seed()); } } @@ -64,10 +64,10 @@ pub mod keys { pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; #[cfg(test)] - pub use ZEROED_TEST_SEED as DEFAULT_SEED; + pub use ZEROED_TEST_SEED as CURRENT_SEED; #[cfg(not(test))] - pub use crate::ephemeral_instance_keys::RANDOM_SEED as DEFAULT_SEED; + pub use crate::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; #[cfg(test)] mod tests { @@ -75,22 +75,22 @@ pub mod keys { use crate::ephemeral_instance_keys::RANDOM_SEED; use crate::protocol::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::protocol::crypto::keys::seeds::DEFAULT_SEED; + use crate::protocol::crypto::keys::seeds::CURRENT_SEED; #[test] fn it_should_have_a_zero_test_seed() { - assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]) + assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); } #[test] fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*DEFAULT_SEED, *ZEROED_TEST_SEED) + assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); } #[test] fn it_should_have_a_large_random_seed() { - assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u64::MAX as u128); - assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u64::MAX as u128); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); } } } diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs new file mode 100644 index 000000000..3b9b2fa35 --- /dev/null +++ b/src/protocol/info_hash.rs @@ -0,0 +1,190 @@ +#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] +pub struct InfoHash(pub [u8; 20]); + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"expected a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"expected a hexadecimal string", + )); + }; + Ok(res) + } +} + +#[cfg(test)] +mod tests { + + use std::str::FromStr; + + use serde::{Deserialize, Serialize}; + use serde_json::json; + + use super::InfoHash; + + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] + struct ContainingInfoHash { + pub info_hash: InfoHash, + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); + assert!(info_hash.is_ok()); + } + + #[test] + fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { + let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { + let info_hash = InfoHash::from_str(&"F".repeat(39)); + assert!(info_hash.is_err()); + + let info_hash = InfoHash::from_str(&"F".repeat(41)); + assert!(info_hash.is_err()); + } + + #[test] + fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { + let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); + + let output = format!("{}", info_hash); + + assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { + let info_hash: InfoHash = [255u8; 20].as_slice().into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { + let info_hash: InfoHash = [255u8; 20].into(); + + assert_eq!( + info_hash, + InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + ); + } + + #[test] + fn an_info_hash_can_be_serialized() { + let s = ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), + }; + + let json_serialized_value = serde_json::to_string(&s).unwrap(); + + assert_eq!( + json_serialized_value, + r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# + ); + } + + #[test] + fn an_info_hash_can_be_deserialized() { + let json = json!({ + "info_hash": "ffffffffffffffffffffffffffffffffffffffff", + }); + + let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); + + assert_eq!( + s, + ContainingInfoHash { + info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() + } + ); + } +} diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index 85e4f90ad..bd4310dcf 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -1,4 +1,5 @@ pub mod clock; pub mod common; pub mod crypto; +pub mod info_hash; pub mod utils; diff --git a/src/protocol/utils.rs b/src/protocol/utils.rs index ac20aa41e..cec02ceaf 100644 --- a/src/protocol/utils.rs +++ b/src/protocol/utils.rs @@ -1,5 +1,9 @@ use super::clock::DurationSinceUnixEpoch; +/// # Errors +/// +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] ser.serialize_u64(unix_time_value.as_millis() as u64) } diff --git a/src/setup.rs b/src/setup.rs index 9906a2d03..a7b7c5a82 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use log::warn; use tokio::task::JoinHandle; +use crate::config::Configuration; use crate::jobs::{http_tracker, torrent_cleanup, tracker_api, udp_tracker}; -use crate::tracker::TorrentTracker; -use crate::Configuration; +use crate::tracker; -pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { +pub async fn setup(config: &Configuration, tracker: Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); // Load peer keys @@ -35,7 +35,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< udp_tracker_config.bind_address, config.mode ); } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())) + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); } } @@ -54,7 +54,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Vec< // Remove torrents without peers, every interval if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(config, tracker.clone())); + jobs.push(torrent_cleanup::start_job(config, &tracker)); } jobs diff --git a/src/stats.rs b/src/stats.rs index 1f387a084..8f87c01a3 100644 --- a/src/stats.rs +++ b/src/stats.rs @@ -1,15 +1,16 @@ -use crate::statistics::{StatsRepository, StatsTracker, TrackerStatisticsEventSender}; +use crate::tracker::statistics; -pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, StatsRepository) { +#[must_use] +pub fn setup_statistics(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { let mut stats_event_sender = None; - let mut stats_tracker = StatsTracker::new(); + let mut stats_tracker = statistics::Keeper::new(); if tracker_usage_statistics { stats_event_sender = Some(stats_tracker.run_event_listener()); } - (stats_event_sender, stats_tracker.stats_repository) + (stats_event_sender, stats_tracker.repository) } #[cfg(test)] diff --git a/src/tracker/key.rs b/src/tracker/auth.rs similarity index 55% rename from src/tracker/key.rs rename to src/tracker/auth.rs index 1bf0557a1..7ac6d6939 100644 --- a/src/tracker/key.rs +++ b/src/tracker/auth.rs @@ -6,10 +6,14 @@ use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::Serialize; -use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time, TimeNow}; -use crate::AUTH_KEY_LENGTH; - -pub fn generate_auth_key(lifetime: Duration) -> AuthKey { +use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time, TimeNow}; +use crate::protocol::common::AUTH_KEY_LENGTH; + +#[must_use] +/// # Panics +/// +/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +pub fn generate(lifetime: Duration) -> Key { let key: String = thread_rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) @@ -18,47 +22,57 @@ pub fn generate_auth_key(lifetime: Duration) -> AuthKey { debug!("Generated key: {}, valid for: {:?} seconds", key, lifetime); - AuthKey { + Key { key, - valid_until: Some(DefaultClock::add(&lifetime).unwrap()), + valid_until: Some(Current::add(&lifetime).unwrap()), } } -pub fn verify_auth_key(auth_key: &AuthKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = DefaultClock::now(); - if auth_key.valid_until.is_none() { - return Err(Error::KeyInvalid); - } - if auth_key.valid_until.unwrap() < current_time { - return Err(Error::KeyExpired); +/// # Errors +/// +/// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// +/// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify(auth_key: &Key) -> Result<(), Error> { + let current_time: DurationSinceUnixEpoch = Current::now(); + + match auth_key.valid_until { + Some(valid_untill) => { + if valid_untill < current_time { + Err(Error::KeyExpired) + } else { + Ok(()) + } + } + None => Err(Error::KeyInvalid), } - - Ok(()) } #[derive(Serialize, Debug, Eq, PartialEq, Clone)] -pub struct AuthKey { +pub struct Key { pub key: String, pub valid_until: Option, } -impl AuthKey { - pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { +impl Key { + #[must_use] + pub fn from_buffer(key_buffer: [u8; AUTH_KEY_LENGTH]) -> Option { if let Ok(key) = String::from_utf8(Vec::from(key_buffer)) { - Some(AuthKey { key, valid_until: None }) + Some(Key { key, valid_until: None }) } else { None } } - pub fn from_string(key: &str) -> Option { - if key.len() != AUTH_KEY_LENGTH { - None - } else { - Some(AuthKey { + #[must_use] + pub fn from_string(key: &str) -> Option { + if key.len() == AUTH_KEY_LENGTH { + Some(Key { key: key.to_string(), valid_until: None, }) + } else { + None } } } @@ -85,12 +99,12 @@ impl From for Error { mod tests { use std::time::Duration; - use crate::protocol::clock::{DefaultClock, StoppedTime}; - use crate::tracker::key; + use crate::protocol::clock::{Current, StoppedTime}; + use crate::tracker::auth; #[test] fn auth_key_from_buffer() { - let auth_key = key::AuthKey::from_buffer([ + let auth_key = auth::Key::from_buffer([ 89, 90, 83, 108, 52, 108, 77, 90, 117, 112, 82, 117, 79, 112, 83, 82, 67, 51, 107, 114, 73, 75, 82, 53, 66, 80, 66, 49, 52, 110, 114, 74, ]); @@ -102,7 +116,7 @@ mod tests { #[test] fn auth_key_from_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = key::AuthKey::from_string(key_string); + let auth_key = auth::Key::from_string(key_string); assert!(auth_key.is_some()); assert_eq!(auth_key.unwrap().key, key_string); @@ -110,27 +124,27 @@ mod tests { #[test] fn generate_valid_auth_key() { - let auth_key = key::generate_auth_key(Duration::new(9999, 0)); + let auth_key = auth::generate(Duration::new(9999, 0)); - assert!(key::verify_auth_key(&auth_key).is_ok()); + assert!(auth::verify(&auth_key).is_ok()); } #[test] fn generate_and_check_expired_auth_key() { // Set the time to the current time. - DefaultClock::local_set_to_system_time_now(); + Current::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. - let auth_key = key::generate_auth_key(Duration::from_secs(19)); + let auth_key = auth::generate(Duration::from_secs(19)); // Mock the time has passed 10 sec. - DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify_auth_key(&auth_key).is_ok()); + assert!(auth::verify(&auth_key).is_ok()); // Mock the time has passed another 10 sec. - DefaultClock::local_add(&Duration::from_secs(10)).unwrap(); + Current::local_add(&Duration::from_secs(10)).unwrap(); - assert!(key::verify_auth_key(&auth_key).is_err()); + assert!(auth::verify(&auth_key).is_err()); } } diff --git a/src/tracker/mod.rs b/src/tracker/mod.rs index a3eecd427..4469d682b 100644 --- a/src/tracker/mod.rs +++ b/src/tracker/mod.rs @@ -1,4 +1,4 @@ -pub mod key; +pub mod auth; pub mod mode; pub mod peer; pub mod statistics; @@ -13,36 +13,33 @@ use std::time::Duration; use tokio::sync::mpsc::error::SendError; use tokio::sync::{RwLock, RwLockReadGuard}; -use crate::databases::database; -use crate::databases::database::Database; -use crate::mode::TrackerMode; -use crate::peer::TorrentPeer; -use crate::protocol::common::InfoHash; -use crate::statistics::{StatsRepository, TrackerStatistics, TrackerStatisticsEvent, TrackerStatisticsEventSender}; -use crate::tracker::key::AuthKey; -use crate::tracker::torrent::{TorrentEntry, TorrentError, TorrentStats}; -use crate::Configuration; - -pub struct TorrentTracker { +use crate::config::Configuration; +use crate::databases::{self, Database}; +use crate::protocol::info_hash::InfoHash; + +pub struct Tracker { pub config: Arc, - mode: TrackerMode, - keys: RwLock>, + mode: mode::Mode, + keys: RwLock>, whitelist: RwLock>, - torrents: RwLock>, - stats_event_sender: Option>, - stats_repository: StatsRepository, + torrents: RwLock>, + stats_event_sender: Option>, + stats_repository: statistics::Repo, database: Box, } -impl TorrentTracker { +impl Tracker { + /// # Errors + /// + /// Will return a `r2d2::Error` if unable to connect to database. pub fn new( - config: Arc, - stats_event_sender: Option>, - stats_repository: StatsRepository, - ) -> Result { - let database = database::connect_database(&config.db_driver, &config.db_path)?; + config: &Arc, + stats_event_sender: Option>, + stats_repository: statistics::Repo, + ) -> Result { + let database = databases::connect(&config.db_driver, &config.db_path)?; - Ok(TorrentTracker { + Ok(Tracker { config: config.clone(), mode: config.mode, keys: RwLock::new(std::collections::HashMap::new()), @@ -55,59 +52,75 @@ impl TorrentTracker { } pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public + self.mode == mode::Mode::Public } pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + self.mode == mode::Mode::Private || self.mode == mode::Mode::PrivateListed } pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + self.mode == mode::Mode::Listed || self.mode == mode::Mode::PrivateListed } - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = key::generate_auth_key(lifetime); + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { + let auth_key = auth::generate(lifetime); self.database.add_key_to_keys(&auth_key).await?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } - pub async fn remove_auth_key(&self, key: &str) -> Result<(), database::Error> { + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` to the database. + pub async fn remove_auth_key(&self, key: &str) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key).await?; self.keys.write().await.remove(key); Ok(()) } - pub async fn verify_auth_key(&self, auth_key: &AuthKey) -> Result<(), key::Error> { + /// # Errors + /// + /// Will return a `key::Error` if unable to get any `auth_key`. + pub async fn verify_auth_key(&self, auth_key: &auth::Key) -> Result<(), auth::Error> { match self.keys.read().await.get(&auth_key.key) { - None => Err(key::Error::KeyInvalid), - Some(key) => key::verify_auth_key(key), + None => Err(auth::Error::KeyInvalid), + Some(key) => auth::verify(key), } } - pub async fn load_keys(&self) -> Result<(), database::Error> { + /// # Errors + /// + /// Will return a `database::Error` if unable to `load_keys` from the database. + pub async fn load_keys(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.database.load_keys().await?; let mut keys = self.keys.write().await; keys.clear(); for key in keys_from_database { - let _ = keys.insert(key.key.clone(), key); + keys.insert(key.key.clone(), key); } Ok(()) } - // Adding torrents is not relevant to public trackers. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + /// Adding torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.add_torrent_to_database_whitelist(info_hash).await?; self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } /// It adds a torrent to the whitelist if it has not been whitelisted previously - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { if self.database.is_info_hash_whitelisted(info_hash).await.unwrap() { return Ok(()); } @@ -121,8 +134,12 @@ impl TorrentTracker { self.whitelist.write().await.insert(*info_hash) } - // Removing torrents is not relevant to public trackers. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), database::Error> { + /// Removing torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.database.remove_info_hash_from_whitelist(*info_hash).await?; self.whitelist.write().await.remove(info_hash); Ok(()) @@ -132,7 +149,10 @@ impl TorrentTracker { self.whitelist.read().await.contains(info_hash) } - pub async fn load_whitelist(&self) -> Result<(), database::Error> { + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub async fn load_whitelist(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist().await?; let mut whitelist = self.whitelist.write().await; @@ -145,7 +165,14 @@ impl TorrentTracker { Ok(()) } - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), TorrentError> { + /// # Errors + /// + /// Will return a `torrent::Error::PeerKeyNotValid` if the `key` is not valid. + /// + /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. + /// + /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. + pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), torrent::Error> { // no authentication needed in public mode if self.is_public() { return Ok(()); @@ -156,25 +183,29 @@ impl TorrentTracker { match key { Some(key) => { if self.verify_auth_key(key).await.is_err() { - return Err(TorrentError::PeerKeyNotValid); + return Err(torrent::Error::PeerKeyNotValid); } } None => { - return Err(TorrentError::PeerNotAuthenticated); + return Err(torrent::Error::PeerNotAuthenticated); } } } // check if info_hash is whitelisted if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(TorrentError::TorrentNotWhitelisted); + return Err(torrent::Error::TorrentNotWhitelisted); } Ok(()) } - // Loading the torrents from database into memory - pub async fn load_persistent_torrents(&self) -> Result<(), database::Error> { + /// Loading the torrents from database into memory + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. + pub async fn load_persistent_torrents(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.database.load_persistent_torrents().await?; let mut torrents = self.torrents.write().await; @@ -184,8 +215,8 @@ impl TorrentTracker { continue; } - let torrent_entry = TorrentEntry { - peers: Default::default(), + let torrent_entry = torrent::Entry { + peers: BTreeMap::default(), completed, }; @@ -196,30 +227,30 @@ impl TorrentTracker { } /// Get all torrent peers for a given torrent filtering out the peer with the client address - pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash, client_addr: &SocketAddr) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(client_addr)).into_iter().cloned().collect(), + Some(entry) => entry.get_peers(Some(client_addr)).into_iter().copied().collect(), } } /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { + pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { let read_lock = self.torrents.read().await; match read_lock.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(None).into_iter().cloned().collect(), + Some(entry) => entry.get_peers(None).into_iter().copied().collect(), } } - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &TorrentPeer) -> TorrentStats { + pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwamStats { let mut torrents = self.torrents.write().await; let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(TorrentEntry::new()), + Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), Entry::Occupied(entry) => entry.into_mut(), }; @@ -235,22 +266,22 @@ impl TorrentTracker { let (seeders, completed, leechers) = torrent_entry.get_stats(); - TorrentStats { + torrent::SwamStats { + completed, seeders, leechers, - completed, } } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { + pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { self.torrents.read().await } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } - pub async fn send_stats_event(&self, event: TrackerStatisticsEvent) -> Option>> { + pub async fn send_stats_event(&self, event: statistics::Event) -> Option>> { match &self.stats_event_sender { None => None, Some(stats_event_sender) => stats_event_sender.send_event(event).await, @@ -266,9 +297,10 @@ impl TorrentTracker { torrents_lock.retain(|_, torrent_entry| { torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - match self.config.persistent_torrent_completed_stat { - true => torrent_entry.completed > 0 || !torrent_entry.peers.is_empty(), - false => !torrent_entry.peers.is_empty(), + if self.config.persistent_torrent_completed_stat { + torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() + } else { + !torrent_entry.peers.is_empty() } }); } else { diff --git a/src/tracker/mode.rs b/src/tracker/mode.rs index f444b4523..a0dba6e67 100644 --- a/src/tracker/mode.rs +++ b/src/tracker/mode.rs @@ -2,7 +2,7 @@ use serde; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum TrackerMode { +pub enum Mode { // Will track every new info hash and serve every peer. #[serde(rename = "public")] Public, diff --git a/src/tracker/peer.rs b/src/tracker/peer.rs index 42ef6a60b..2da257d3e 100644 --- a/src/tracker/peer.rs +++ b/src/tracker/peer.rs @@ -4,14 +4,14 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use serde; use serde::Serialize; -use crate::http::AnnounceRequest; -use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, Time}; -use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef, PeerId}; +use crate::http::request::Announce; +use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Time}; +use crate::protocol::common::{AnnounceEventDef, NumberOfBytesDef}; use crate::protocol::utils::ser_unix_time_value; #[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] -pub struct TorrentPeer { - pub peer_id: PeerId, +pub struct Peer { + pub peer_id: Id, pub peer_addr: SocketAddr, #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, @@ -25,18 +25,19 @@ pub struct TorrentPeer { pub event: AnnounceEvent, } -impl TorrentPeer { +impl Peer { + #[must_use] pub fn from_udp_announce_request( announce_request: &aquatic_udp_protocol::AnnounceRequest, remote_ip: IpAddr, host_opt_ip: Option, ) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); + let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port.0); - TorrentPeer { - peer_id: PeerId(announce_request.peer_id.0), + Peer { + peer_id: Id(announce_request.peer_id.0), peer_addr, - updated: DefaultClock::now(), + updated: Current::now(), uploaded: announce_request.bytes_uploaded, downloaded: announce_request.bytes_downloaded, left: announce_request.bytes_left, @@ -44,12 +45,9 @@ impl TorrentPeer { } } - pub fn from_http_announce_request( - announce_request: &AnnounceRequest, - remote_ip: IpAddr, - host_opt_ip: Option, - ) -> Self { - let peer_addr = TorrentPeer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); + #[must_use] + pub fn from_http_announce_request(announce_request: &Announce, remote_ip: IpAddr, host_opt_ip: Option) -> Self { + let peer_addr = Peer::peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip, host_opt_ip, announce_request.port); let event: AnnounceEvent = if let Some(event) = &announce_request.event { match event.as_ref() { @@ -62,18 +60,20 @@ impl TorrentPeer { AnnounceEvent::None }; - TorrentPeer { - peer_id: announce_request.peer_id.clone(), + #[allow(clippy::cast_possible_truncation)] + Peer { + peer_id: announce_request.peer_id, peer_addr, - updated: DefaultClock::now(), - uploaded: NumberOfBytes(announce_request.uploaded as i64), - downloaded: NumberOfBytes(announce_request.downloaded as i64), - left: NumberOfBytes(announce_request.left as i64), + updated: Current::now(), + uploaded: NumberOfBytes(i128::from(announce_request.uploaded) as i64), + downloaded: NumberOfBytes(i128::from(announce_request.downloaded) as i64), + left: NumberOfBytes(i128::from(announce_request.left) as i64), event, } } // potentially substitute localhost ip with external ip + #[must_use] pub fn peer_addr_from_ip_and_port_and_opt_host_ip(remote_ip: IpAddr, host_opt_ip: Option, port: u16) -> SocketAddr { if let Some(host_ip) = host_opt_ip.filter(|_| remote_ip.is_loopback()) { SocketAddr::new(host_ip, port) @@ -82,11 +82,137 @@ impl TorrentPeer { } } + #[must_use] pub fn is_seeder(&self) -> bool { self.left.0 <= 0 && self.event != AnnounceEvent::Stopped } } +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +impl std::fmt::Display for Id { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut buffer = [0u8; 20]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok(); + match bytes_out { + Some(bytes) => write!(f, "{}", std::str::from_utf8(bytes).unwrap()), + None => write!(f, ""), + } + } +} + +impl Id { + #[must_use] + /// # Panics + /// + /// It will panic if the `binascii::bin2hex` from a too-small output buffer. + pub fn get_id(&self) -> Option { + let buff_size = self.0.len() * 2; + let mut tmp: Vec = vec![0; buff_size]; + binascii::bin2hex(&self.0, &mut tmp).unwrap(); + + std::str::from_utf8(&tmp).ok().map(std::string::ToString::to_string) + } + + #[must_use] + pub fn get_client_name(&self) -> Option<&'static str> { + if self.0[0] == b'M' { + return Some("BitTorrent"); + } + if self.0[0] == b'-' { + let name = match &self.0[1..3] { + b"AG" | b"A~" => "Ares", + b"AR" => "Arctic", + b"AV" => "Avicora", + b"AX" => "BitPump", + b"AZ" => "Azureus", + b"BB" => "BitBuddy", + b"BC" => "BitComet", + b"BF" => "Bitflu", + b"BG" => "BTG (uses Rasterbar libtorrent)", + b"BR" => "BitRocket", + b"BS" => "BTSlave", + b"BX" => "~Bittorrent X", + b"CD" => "Enhanced CTorrent", + b"CT" => "CTorrent", + b"DE" => "DelugeTorrent", + b"DP" => "Propagate Data Client", + b"EB" => "EBit", + b"ES" => "electric sheep", + b"FT" => "FoxTorrent", + b"FW" => "FrostWire", + b"FX" => "Freebox BitTorrent", + b"GS" => "GSTorrent", + b"HL" => "Halite", + b"HN" => "Hydranode", + b"KG" => "KGet", + b"KT" => "KTorrent", + b"LH" => "LH-ABC", + b"LP" => "Lphant", + b"LT" => "libtorrent", + b"lt" => "libTorrent", + b"LW" => "LimeWire", + b"MO" => "MonoTorrent", + b"MP" => "MooPolice", + b"MR" => "Miro", + b"MT" => "MoonlightTorrent", + b"NX" => "Net Transport", + b"PD" => "Pando", + b"qB" => "qBittorrent", + b"QD" => "QQDownload", + b"QT" => "Qt 4 Torrent example", + b"RT" => "Retriever", + b"S~" => "Shareaza alpha/beta", + b"SB" => "~Swiftbit", + b"SS" => "SwarmScope", + b"ST" => "SymTorrent", + b"st" => "sharktorrent", + b"SZ" => "Shareaza", + b"TN" => "TorrentDotNET", + b"TR" => "Transmission", + b"TS" => "Torrentstorm", + b"TT" => "TuoTu", + b"UL" => "uLeecher!", + b"UT" => "µTorrent", + b"UW" => "µTorrent Web", + b"VG" => "Vagaa", + b"WD" => "WebTorrent Desktop", + b"WT" => "BitLet", + b"WW" => "WebTorrent", + b"WY" => "FireTorrent", + b"XL" => "Xunlei", + b"XT" => "XanTorrent", + b"XX" => "Xtorrent", + b"ZT" => "ZipTorrent", + _ => return None, + }; + Some(name) + } else { + None + } + } +} + +impl Serialize for Id { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + #[derive(Serialize)] + struct PeerIdInfo<'a> { + id: Option, + client: Option<&'a str>, + } + + let obj = PeerIdInfo { + id: self.get_id(), + client: self.get_client_name(), + }; + obj.serialize(serializer) + } +} + #[cfg(test)] mod test { mod torrent_peer { @@ -95,16 +221,15 @@ mod test { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::peer::TorrentPeer; - use crate::protocol::clock::{DefaultClock, Time}; - use crate::PeerId; + use crate::protocol::clock::{Current, Time}; + use crate::tracker::peer::{self, Peer}; #[test] fn it_should_be_serializable() { - let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), + let torrent_peer = Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), @@ -129,8 +254,9 @@ mod test { AnnounceEvent, AnnounceRequest, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::peer::TorrentPeer; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::tracker::peer::Peer; + use crate::udp::connection_cookie::{into_connection_id, make}; + // todo: duplicate functions is PR 82. Remove duplication once both PR are merged. fn sample_ipv4_remote_addr() -> SocketAddr { @@ -152,7 +278,7 @@ mod test { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId(*b"-qB00000000000000000"), @@ -180,7 +306,7 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -190,7 +316,7 @@ mod test { let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -200,15 +326,15 @@ mod test { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; - use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::peer::TorrentPeer; + use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::tracker::peer::Peer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -219,7 +345,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -230,7 +356,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -241,15 +367,15 @@ mod test { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::str::FromStr; - use crate::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; - use crate::peer::TorrentPeer; + use crate::tracker::peer::test::torrent_peer_constructor_from_udp_requests::AnnounceRequestBuilder; + use crate::tracker::peer::Peer; #[test] fn it_should_use_the_loopback_ip_if_the_server_does_not_have_the_external_ip_configuration() { let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); let announce_request = AnnounceRequestBuilder::default().into(); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(remote_ip, announce_request.port.0)); } @@ -260,7 +386,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -271,7 +397,7 @@ mod test { let announce_request = AnnounceRequestBuilder::default().into(); let host_opt_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - let torrent_peer = TorrentPeer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); + let torrent_peer = Peer::from_udp_announce_request(&announce_request, remote_ip, Some(host_opt_ip)); assert_eq!(torrent_peer.peer_addr, SocketAddr::new(host_opt_ip, announce_request.port.0)); } @@ -281,17 +407,17 @@ mod test { mod torrent_peer_constructor_from_for_http_requests { use std::net::{IpAddr, Ipv4Addr}; - use crate::http::AnnounceRequest; - use crate::peer::TorrentPeer; - use crate::{InfoHash, PeerId}; + use crate::http::request::Announce; + use crate::protocol::info_hash::InfoHash; + use crate::tracker::peer::{self, Peer}; - fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> AnnounceRequest { - AnnounceRequest { + fn sample_http_announce_request(peer_addr: IpAddr, port: u16) -> Announce { + Announce { info_hash: InfoHash([0u8; 20]), peer_addr, downloaded: 0u64, uploaded: 0u64, - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: peer::Id(*b"-qB00000000000000000"), port, left: 0u64, event: None, @@ -306,7 +432,7 @@ mod test { let ip_in_announce_request = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); let announce_request = sample_http_announce_request(ip_in_announce_request, 8080); - let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr.ip(), remote_ip); assert_ne!(torrent_peer.peer_addr.ip(), ip_in_announce_request); @@ -321,7 +447,7 @@ mod test { let announce_request = sample_http_announce_request(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), port_in_announce_request); - let torrent_peer = TorrentPeer::from_http_announce_request(&announce_request, remote_ip, None); + let torrent_peer = Peer::from_http_announce_request(&announce_request, remote_ip, None); assert_eq!(torrent_peer.peer_addr.port(), announce_request.port); assert_ne!(torrent_peer.peer_addr.port(), remote_port); diff --git a/src/tracker/statistics.rs b/src/tracker/statistics.rs index ac3889270..b787e1267 100644 --- a/src/tracker/statistics.rs +++ b/src/tracker/statistics.rs @@ -3,15 +3,14 @@ use std::sync::Arc; use async_trait::async_trait; use log::debug; #[cfg(test)] -use mockall::{automock, predicate::*}; +use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; -use tokio::sync::mpsc::{Receiver, Sender}; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; const CHANNEL_BUFFER_SIZE: usize = 65_535; #[derive(Debug, PartialEq, Eq)] -pub enum TrackerStatisticsEvent { +pub enum Event { Tcp4Announce, Tcp4Scrape, Tcp6Announce, @@ -25,7 +24,7 @@ pub enum TrackerStatisticsEvent { } #[derive(Debug)] -pub struct TrackerStatistics { +pub struct Metrics { pub tcp4_connections_handled: u64, pub tcp4_announces_handled: u64, pub tcp4_scrapes_handled: u64, @@ -40,13 +39,14 @@ pub struct TrackerStatistics { pub udp6_scrapes_handled: u64, } -impl Default for TrackerStatistics { +impl Default for Metrics { fn default() -> Self { Self::new() } } -impl TrackerStatistics { +impl Metrics { + #[must_use] pub fn new() -> Self { Self { tcp4_connections_handled: 0, @@ -65,89 +65,89 @@ impl TrackerStatistics { } } -pub struct StatsTracker { - pub stats_repository: StatsRepository, +pub struct Keeper { + pub repository: Repo, } -impl Default for StatsTracker { +impl Default for Keeper { fn default() -> Self { Self::new() } } -impl StatsTracker { +impl Keeper { + #[must_use] pub fn new() -> Self { - Self { - stats_repository: StatsRepository::new(), - } + Self { repository: Repo::new() } } - pub fn new_active_instance() -> (Box, StatsRepository) { + #[must_use] + pub fn new_active_instance() -> (Box, Repo) { let mut stats_tracker = Self::new(); let stats_event_sender = stats_tracker.run_event_listener(); - (stats_event_sender, stats_tracker.stats_repository) + (stats_event_sender, stats_tracker.repository) } - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - let stats_repository = self.stats_repository.clone(); + let stats_repository = self.repository.clone(); tokio::spawn(async move { event_listener(receiver, stats_repository).await }); - Box::new(StatsEventSender { sender }) + Box::new(Sender { sender }) } } -async fn event_listener(mut receiver: Receiver, stats_repository: StatsRepository) { +async fn event_listener(mut receiver: mpsc::Receiver, stats_repository: Repo) { while let Some(event) = receiver.recv().await { event_handler(event, &stats_repository).await; } } -async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRepository) { +async fn event_handler(event: Event, stats_repository: &Repo) { match event { // TCP4 - TrackerStatisticsEvent::Tcp4Announce => { + Event::Tcp4Announce => { stats_repository.increase_tcp4_announces().await; stats_repository.increase_tcp4_connections().await; } - TrackerStatisticsEvent::Tcp4Scrape => { + Event::Tcp4Scrape => { stats_repository.increase_tcp4_scrapes().await; stats_repository.increase_tcp4_connections().await; } // TCP6 - TrackerStatisticsEvent::Tcp6Announce => { + Event::Tcp6Announce => { stats_repository.increase_tcp6_announces().await; stats_repository.increase_tcp6_connections().await; } - TrackerStatisticsEvent::Tcp6Scrape => { + Event::Tcp6Scrape => { stats_repository.increase_tcp6_scrapes().await; stats_repository.increase_tcp6_connections().await; } // UDP4 - TrackerStatisticsEvent::Udp4Connect => { + Event::Udp4Connect => { stats_repository.increase_udp4_connections().await; } - TrackerStatisticsEvent::Udp4Announce => { + Event::Udp4Announce => { stats_repository.increase_udp4_announces().await; } - TrackerStatisticsEvent::Udp4Scrape => { + Event::Udp4Scrape => { stats_repository.increase_udp4_scrapes().await; } // UDP6 - TrackerStatisticsEvent::Udp6Connect => { + Event::Udp6Connect => { stats_repository.increase_udp6_connections().await; } - TrackerStatisticsEvent::Udp6Announce => { + Event::Udp6Announce => { stats_repository.increase_udp6_announces().await; } - TrackerStatisticsEvent::Udp6Scrape => { + Event::Udp6Scrape => { stats_repository.increase_udp6_scrapes().await; } } @@ -157,40 +157,41 @@ async fn event_handler(event: TrackerStatisticsEvent, stats_repository: &StatsRe #[async_trait] #[cfg_attr(test, automock)] -pub trait TrackerStatisticsEventSender: Sync + Send { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>>; +pub trait EventSender: Sync + Send { + async fn send_event(&self, event: Event) -> Option>>; } -pub struct StatsEventSender { - sender: Sender, +pub struct Sender { + sender: mpsc::Sender, } #[async_trait] -impl TrackerStatisticsEventSender for StatsEventSender { - async fn send_event(&self, event: TrackerStatisticsEvent) -> Option>> { +impl EventSender for Sender { + async fn send_event(&self, event: Event) -> Option>> { Some(self.sender.send(event).await) } } #[derive(Clone)] -pub struct StatsRepository { - pub stats: Arc>, +pub struct Repo { + pub stats: Arc>, } -impl Default for StatsRepository { +impl Default for Repo { fn default() -> Self { Self::new() } } -impl StatsRepository { +impl Repo { + #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(TrackerStatistics::new())), + stats: Arc::new(RwLock::new(Metrics::new())), } } - pub async fn get_stats(&self) -> RwLockReadGuard<'_, TrackerStatistics> { + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { self.stats.read().await } @@ -271,37 +272,37 @@ impl StatsRepository { mod tests { mod stats_tracker { - use crate::statistics::{StatsTracker, TrackerStatistics, TrackerStatisticsEvent}; + use crate::tracker::statistics::{Event, Keeper, Metrics}; #[tokio::test] async fn should_contain_the_tracker_statistics() { - let stats_tracker = StatsTracker::new(); + let stats_tracker = Keeper::new(); - let stats = stats_tracker.stats_repository.get_stats().await; + let stats = stats_tracker.repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, TrackerStatistics::new().tcp4_announces_handled); + assert_eq!(stats.tcp4_announces_handled, Metrics::new().tcp4_announces_handled); } #[tokio::test] async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = StatsTracker::new(); + let mut stats_tracker = Keeper::new(); let event_sender = stats_tracker.run_event_listener(); - let result = event_sender.send_event(TrackerStatisticsEvent::Udp4Connect).await; + let result = event_sender.send_event(Event::Udp4Connect).await; assert!(result.is_some()); } } mod event_handler { - use crate::statistics::{event_handler, StatsRepository, TrackerStatisticsEvent}; + use crate::tracker::statistics::{event_handler, Event, Repo}; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + event_handler(Event::Tcp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -310,9 +311,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Announce, &stats_repository).await; + event_handler(Event::Tcp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -321,9 +322,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + event_handler(Event::Tcp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -332,9 +333,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp4Scrape, &stats_repository).await; + event_handler(Event::Tcp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -343,9 +344,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + event_handler(Event::Tcp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -354,9 +355,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Announce, &stats_repository).await; + event_handler(Event::Tcp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -365,9 +366,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + event_handler(Event::Tcp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -376,9 +377,9 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Tcp6Scrape, &stats_repository).await; + event_handler(Event::Tcp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -387,9 +388,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Connect, &stats_repository).await; + event_handler(Event::Udp4Connect, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -398,9 +399,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Announce, &stats_repository).await; + event_handler(Event::Udp4Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -409,9 +410,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp4Scrape, &stats_repository).await; + event_handler(Event::Udp4Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -420,9 +421,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Connect, &stats_repository).await; + event_handler(Event::Udp6Connect, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -431,9 +432,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Announce, &stats_repository).await; + event_handler(Event::Udp6Announce, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -442,9 +443,9 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { - let stats_repository = StatsRepository::new(); + let stats_repository = Repo::new(); - event_handler(TrackerStatisticsEvent::Udp6Scrape, &stats_repository).await; + event_handler(Event::Udp6Scrape, &stats_repository).await; let stats = stats_repository.get_stats().await; diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs index 335554006..e292dff54 100644 --- a/src/tracker/torrent.rs +++ b/src/tracker/torrent.rs @@ -4,27 +4,28 @@ use std::time::Duration; use aquatic_udp_protocol::AnnounceEvent; use serde::{Deserialize, Serialize}; -use crate::peer::TorrentPeer; -use crate::protocol::clock::{DefaultClock, TimeNow}; -use crate::{PeerId, MAX_SCRAPE_TORRENTS}; +use super::peer; +use crate::protocol::clock::{Current, TimeNow}; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; #[derive(Serialize, Deserialize, Clone, Debug)] -pub struct TorrentEntry { +pub struct Entry { #[serde(skip)] - pub peers: std::collections::BTreeMap, + pub peers: std::collections::BTreeMap, pub completed: u32, } -impl TorrentEntry { - pub fn new() -> TorrentEntry { - TorrentEntry { +impl Entry { + #[must_use] + pub fn new() -> Entry { + Entry { peers: std::collections::BTreeMap::new(), completed: 0, } } // Update peer and return completed (times torrent has been downloaded) - pub fn update_peer(&mut self, peer: &TorrentPeer) -> bool { + pub fn update_peer(&mut self, peer: &peer::Peer) -> bool { let mut did_torrent_stats_change: bool = false; match peer.event { @@ -47,7 +48,8 @@ impl TorrentEntry { did_torrent_stats_change } - pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&TorrentPeer> { + #[must_use] + pub fn get_peers(&self, client_addr: Option<&SocketAddr>) -> Vec<&peer::Peer> { self.peers .values() .filter(|peer| match client_addr { @@ -70,6 +72,8 @@ impl TorrentEntry { .collect() } + #[allow(clippy::cast_possible_truncation)] + #[must_use] pub fn get_stats(&self) -> (u32, u32, u32) { let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; let leechers: u32 = self.peers.len() as u32 - seeders; @@ -77,26 +81,26 @@ impl TorrentEntry { } pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = DefaultClock::sub(&Duration::from_secs(max_peer_timeout as u64)).unwrap_or_default(); + let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); self.peers.retain(|_, peer| peer.updated > current_cutoff); } } -impl Default for TorrentEntry { +impl Default for Entry { fn default() -> Self { Self::new() } } #[derive(Debug)] -pub struct TorrentStats { +pub struct SwamStats { pub completed: u32, pub seeders: u32, pub leechers: u32, } #[derive(Debug)] -pub enum TorrentError { +pub enum Error { TorrentNotWhitelisted, PeerNotAuthenticated, PeerKeyNotValid, @@ -113,21 +117,20 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::peer::TorrentPeer; - use crate::protocol::clock::{DefaultClock, DurationSinceUnixEpoch, StoppedClock, StoppedTime, Time, WorkingClock}; - use crate::torrent::TorrentEntry; - use crate::PeerId; + use crate::protocol::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; + use crate::tracker::peer; + use crate::tracker::torrent::Entry; struct TorrentPeerBuilder { - peer: TorrentPeer, + peer: peer::Peer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = TorrentPeer { - peer_id: PeerId([0u8; 20]), + let default_peer = peer::Peer { + peer_id: peer::Id([0u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), @@ -146,7 +149,7 @@ mod tests { self } - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { self.peer.peer_id = peer_id; self } @@ -161,14 +164,14 @@ mod tests { self } - pub fn into(self) -> TorrentPeer { + pub fn into(self) -> peer::Peer { self.peer } } /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped - fn a_torrent_seeder() -> TorrentPeer { + fn a_torrent_seeder() -> peer::Peer { TorrentPeerBuilder::default() .with_number_of_bytes_left(0) .with_event_completed() @@ -177,7 +180,7 @@ mod tests { /// A torrent leecher is a peer that is not a seeder. /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> TorrentPeer { + fn a_torrent_leecher() -> peer::Peer { TorrentPeerBuilder::default() .with_number_of_bytes_left(1) .with_event_completed() @@ -186,14 +189,14 @@ mod tests { #[test] fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = TorrentEntry::new(); + let torrent_entry = Entry::new(); assert_eq!(torrent_entry.get_peers(None).len(), 0); } #[test] fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -204,7 +207,7 @@ mod tests { #[test] fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -214,7 +217,7 @@ mod tests { #[test] fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -226,7 +229,7 @@ mod tests { #[test] fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -238,7 +241,7 @@ mod tests { #[test] fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -252,7 +255,7 @@ mod tests { #[test] fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( ) { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Add a peer that did not exist before in the entry @@ -263,7 +266,7 @@ mod tests { #[test] fn a_torrent_entry_could_filter_out_peers_with_a_given_socket_address() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); torrent_entry.update_peer(&torrent_peer); // Add peer @@ -274,9 +277,9 @@ mod tests { assert_eq!(peers.len(), 0); } - fn peer_id_from_i32(number: i32) -> PeerId { + fn peer_id_from_i32(number: i32) -> peer::Id { let peer_id = number.to_le_bytes(); - PeerId([ + peer::Id([ 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], peer_id[3], ]) @@ -284,7 +287,7 @@ mod tests { #[test] fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { @@ -296,12 +299,12 @@ mod tests { let peers = torrent_entry.get_peers(None); - assert_eq!(peers.len(), 74) + assert_eq!(peers.len(), 74); } #[test] fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_seeder = a_torrent_seeder(); torrent_entry.update_peer(&torrent_seeder); // Add seeder @@ -311,7 +314,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_leecher = a_torrent_leecher(); torrent_entry.update_peer(&torrent_leecher); // Add leecher @@ -322,7 +325,7 @@ mod tests { #[test] fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( ) { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let mut torrent_peer = TorrentPeerBuilder::default().into(); torrent_entry.update_peer(&torrent_peer); // Add the peer @@ -337,7 +340,7 @@ mod tests { #[test] fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); // Announce "Completed" torrent download event. @@ -351,14 +354,14 @@ mod tests { #[test] fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = TorrentEntry::new(); + let mut torrent_entry = Entry::new(); let timeout = 120u32; - let now = WorkingClock::now(); - StoppedClock::local_set(&now); + let now = Working::now(); + Stopped::local_set(&now); - let timeout_seconds_before_now = now.sub(Duration::from_secs(timeout as u64)); + let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); let inactive_peer = TorrentPeerBuilder::default() .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) .into(); diff --git a/src/udp/connection_cookie.rs b/src/udp/connection_cookie.rs index c40a56959..3daa3e0f6 100644 --- a/src/udp/connection_cookie.rs +++ b/src/udp/connection_cookie.rs @@ -2,8 +2,8 @@ use std::net::SocketAddr; use aquatic_udp_protocol::ConnectionId; +use super::error::Error; use crate::protocol::clock::time_extent::{Extent, TimeExtent}; -use crate::udp::ServerError; pub type Cookie = [u8; 8]; @@ -11,25 +11,32 @@ pub type SinceUnixEpochTimeExtent = TimeExtent; pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); +#[must_use] pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { connection_id.0.to_le_bytes() } +#[must_use] pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { ConnectionId(i64::from_le_bytes(*connection_cookie)) } -pub fn make_connection_cookie(remote_address: &SocketAddr) -> Cookie { +#[must_use] +pub fn make(remote_address: &SocketAddr) -> Cookie { let time_extent = cookie_builder::get_last_time_extent(); //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); cookie_builder::build(remote_address, &time_extent) } -pub fn check_connection_cookie( - remote_address: &SocketAddr, - connection_cookie: &Cookie, -) -> Result { +/// # Panics +/// +/// It would panic if the `COOKIE_LIFETIME` constant would be an unreasonably large number. +/// +/// # Errors +/// +/// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. +pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { // we loop backwards testing each time_extent until we find one that matches. // (or the lifetime of time_extents is exhausted) for offset in 0..=COOKIE_LIFETIME.amount { @@ -42,7 +49,7 @@ pub fn check_connection_cookie( return Ok(checking_time_extent); } } - Err(ServerError::InvalidConnectionId) + Err(Error::InvalidConnectionId) } mod cookie_builder { @@ -51,8 +58,8 @@ mod cookie_builder { use std::net::SocketAddr; use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, MakeTimeExtent, TimeExtent}; - use crate::protocol::crypto::keys::seeds::{DefaultSeed, SeedKeeper}; + use crate::protocol::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; + use crate::protocol::crypto::keys::seeds::{Current, Keeper}; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -63,7 +70,7 @@ mod cookie_builder { } pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { - let seed = DefaultSeed::get_seed(); + let seed = Current::get_seed(); let mut hasher = DefaultHasher::new(); @@ -81,20 +88,20 @@ mod tests { use super::cookie_builder::{self}; use crate::protocol::clock::time_extent::{self, Extent}; - use crate::protocol::clock::{StoppedClock, StoppedTime}; - use crate::udp::connection_cookie::{check_connection_cookie, make_connection_cookie, Cookie, COOKIE_LIFETIME}; + use crate::protocol::clock::{Stopped, StoppedTime}; + use crate::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); #[test] fn it_should_make_a_connection_cookie() { - let cookie = make_connection_cookie(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); - // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; - assert_eq!(cookie, ID_COOKIE) + let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + + assert_eq!(cookie, ID_COOKIE); } #[test] @@ -111,7 +118,7 @@ mod tests { //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] - assert_eq!(cookie, cookie_2) + assert_eq!(cookie, cookie_2); } #[test] @@ -129,7 +136,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -147,7 +154,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -165,7 +172,7 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] @@ -183,51 +190,51 @@ mod tests { //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] - assert_ne!(cookie, cookie_2) + assert_ne!(cookie, cookie_2); } #[test] fn it_should_make_different_cookies_for_the_next_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); - StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); - let cookie_next = make_connection_cookie(&remote_address); + let cookie_next = make(&remote_address); - assert_ne!(cookie, cookie_next) + assert_ne!(cookie, cookie_next); } #[test] fn it_should_be_valid_for_this_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_next_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); - StoppedClock::local_add(&COOKIE_LIFETIME.increment).unwrap(); + Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] fn it_should_be_valid_for_the_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); - StoppedClock::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } #[test] @@ -235,10 +242,10 @@ mod tests { fn it_should_be_not_valid_after_their_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let cookie = make_connection_cookie(&remote_address); + let cookie = make(&remote_address); - StoppedClock::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); - check_connection_cookie(&remote_address, &cookie).unwrap(); + check(&remote_address, &cookie).unwrap(); } } diff --git a/src/udp/errors.rs b/src/udp/error.rs similarity index 51% rename from src/udp/errors.rs rename to src/udp/error.rs index 8d7b04b4f..c5fbb3929 100644 --- a/src/udp/errors.rs +++ b/src/udp/error.rs @@ -1,9 +1,11 @@ use thiserror::Error; +use crate::tracker::torrent; + #[derive(Error, Debug)] -pub enum ServerError { +pub enum Error { #[error("internal server error")] - InternalServerError, + InternalServer, #[error("info_hash is either missing or invalid")] InvalidInfoHash, @@ -32,3 +34,16 @@ pub enum ServerError { #[error("bad request")] BadRequest, } + +impl From for Error { + fn from(e: torrent::Error) -> Self { + match e { + torrent::Error::TorrentNotWhitelisted => Error::TorrentNotWhitelisted, + torrent::Error::PeerNotAuthenticated => Error::PeerNotAuthenticated, + torrent::Error::PeerKeyNotValid => Error::PeerKeyNotValid, + torrent::Error::NoPeersFound => Error::NoPeersFound, + torrent::Error::CouldNotSendResponse => Error::InternalServer, + torrent::Error::InvalidInfoHash => Error::InvalidInfoHash, + } + } +} diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index 5514bc1eb..625f42d40 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -6,35 +6,15 @@ use aquatic_udp_protocol::{ NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use super::connection_cookie::{check_connection_cookie, from_connection_id, into_connection_id, make_connection_cookie}; -use crate::peer::TorrentPeer; -use crate::tracker::statistics::TrackerStatisticsEvent; -use crate::tracker::torrent::TorrentError; -use crate::tracker::TorrentTracker; -use crate::udp::errors::ServerError; -use crate::udp::request::AnnounceRequestWrapper; -use crate::{InfoHash, MAX_SCRAPE_TORRENTS}; - -pub async fn authenticate(info_hash: &InfoHash, tracker: Arc) -> Result<(), ServerError> { - match tracker.authenticate_request(info_hash, &None).await { - Ok(_) => Ok(()), - Err(e) => { - let err = match e { - TorrentError::TorrentNotWhitelisted => ServerError::TorrentNotWhitelisted, - TorrentError::PeerNotAuthenticated => ServerError::PeerNotAuthenticated, - TorrentError::PeerKeyNotValid => ServerError::PeerKeyNotValid, - TorrentError::NoPeersFound => ServerError::NoPeersFound, - TorrentError::CouldNotSendResponse => ServerError::InternalServerError, - TorrentError::InvalidInfoHash => ServerError::InvalidInfoHash, - }; - - Err(err) - } - } -} - -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| ServerError::InternalServerError) { +use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; +use crate::protocol::common::MAX_SCRAPE_TORRENTS; +use crate::protocol::info_hash::InfoHash; +use crate::tracker::{self, peer, statistics}; +use crate::udp::error::Error; +use crate::udp::request::AnnounceWrapper; + +pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: Arc) -> Response { + match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|_| Error::InternalServer) { Ok(request) => { let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, @@ -44,19 +24,22 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: A match handle_request(request, remote_addr, tracker).await { Ok(response) => response, - Err(e) => handle_error(e, transaction_id), + Err(e) => handle_error(&e, transaction_id), } } // bad request - Err(_) => handle_error(ServerError::BadRequest, TransactionId(0)), + Err(_) => handle_error(&Error::BadRequest, TransactionId(0)), } } +/// # Errors +/// +/// If a error happens in the `handle_request` function, it will just return the `ServerError`. pub async fn handle_request( request: Request, remote_addr: SocketAddr, - tracker: Arc, -) -> Result { + tracker: Arc, +) -> Result { match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, @@ -64,12 +47,15 @@ pub async fn handle_request( } } +/// # Errors +/// +/// This function dose not ever return an error. pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - tracker: Arc, -) -> Result { - let connection_cookie = make_connection_cookie(&remote_addr); + tracker: Arc, +) -> Result { + let connection_cookie = make(&remote_addr); let connection_id = into_connection_id(&connection_cookie); let response = Response::from(ConnectResponse { @@ -80,33 +66,33 @@ pub async fn handle_connect( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Connect).await; + tracker.send_stats_event(statistics::Event::Udp4Connect).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Connect).await; + tracker.send_stats_event(statistics::Event::Udp6Connect).await; } } Ok(response) } +/// # Errors +/// +/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, - tracker: Arc, -) -> Result { - match check_connection_cookie(&remote_addr, &from_connection_id(&announce_request.connection_id)) { - Ok(_) => {} - Err(e) => { - return Err(e); - } - } + tracker: Arc, +) -> Result { + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; - let wrapped_announce_request = AnnounceRequestWrapper::new(announce_request.clone()); + let wrapped_announce_request = AnnounceWrapper::new(announce_request); - authenticate(&wrapped_announce_request.info_hash, tracker.clone()).await?; + tracker + .authenticate_request(&wrapped_announce_request.info_hash, &None) + .await?; - let peer = TorrentPeer::from_udp_announce_request( + let peer = peer::Peer::from_udp_announce_request( &wrapped_announce_request.announce_request, remote_addr.ip(), tracker.config.get_ext_ip(), @@ -123,12 +109,13 @@ pub async fn handle_announce( .get_torrent_peers(&wrapped_announce_request.info_hash, &peer.peer_addr) .await; + #[allow(clippy::cast_possible_truncation)] let announce_response = if remote_addr.is_ipv4() { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), peers: peers .iter() .filter_map(|peer| { @@ -146,9 +133,9 @@ pub async fn handle_announce( } else { Response::from(AnnounceResponse { transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(tracker.config.announce_interval as i32), - leechers: NumberOfPeers(torrent_stats.leechers as i32), - seeders: NumberOfPeers(torrent_stats.seeders as i32), + announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), + leechers: NumberOfPeers(i64::from(torrent_stats.leechers) as i32), + seeders: NumberOfPeers(i64::from(torrent_stats.seeders) as i32), peers: peers .iter() .filter_map(|peer| { @@ -168,38 +155,43 @@ pub async fn handle_announce( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Announce).await; + tracker.send_stats_event(statistics::Event::Udp4Announce).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Announce).await; + tracker.send_stats_event(statistics::Event::Udp6Announce).await; } } Ok(announce_response) } -// todo: refactor this, db lock can be a lot shorter +/// # Errors +/// +/// This function dose not ever return an error. +/// +/// TODO: refactor this, db lock can be a lot shorter pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, - tracker: Arc, -) -> Result { + tracker: Arc, +) -> Result { let db = tracker.get_torrents().await; let mut torrent_stats: Vec = Vec::new(); - for info_hash in request.info_hashes.iter() { + for info_hash in &request.info_hashes { let info_hash = InfoHash(info_hash.0); let scrape_entry = match db.get(&info_hash) { Some(torrent_info) => { - if authenticate(&info_hash, tracker.clone()).await.is_ok() { + if tracker.authenticate_request(&info_hash, &None).await.is_ok() { let (seeders, completed, leechers) = torrent_info.get_stats(); + #[allow(clippy::cast_possible_truncation)] TorrentScrapeStatistics { - seeders: NumberOfPeers(seeders as i32), - completed: NumberOfDownloads(completed as i32), - leechers: NumberOfPeers(leechers as i32), + seeders: NumberOfPeers(i64::from(seeders) as i32), + completed: NumberOfDownloads(i64::from(completed) as i32), + leechers: NumberOfPeers(i64::from(leechers) as i32), } } else { TorrentScrapeStatistics { @@ -224,10 +216,10 @@ pub async fn handle_scrape( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp4Scrape).await; + tracker.send_stats_event(statistics::Event::Udp4Scrape).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(TrackerStatisticsEvent::Udp6Scrape).await; + tracker.send_stats_event(statistics::Event::Udp6Scrape).await; } } @@ -237,7 +229,7 @@ pub async fn handle_scrape( })) } -fn handle_error(e: ServerError, transaction_id: TransactionId) -> Response { +fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { let message = e.to_string(); Response::from(ErrorResponse { transaction_id, @@ -252,35 +244,32 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use crate::mode::TrackerMode; - use crate::peer::TorrentPeer; - use crate::protocol::clock::{DefaultClock, Time}; - use crate::statistics::StatsTracker; - use crate::tracker::TorrentTracker; - use crate::{Configuration, PeerId}; + use crate::config::Configuration; + use crate::protocol::clock::{Current, Time}; + use crate::tracker::{self, mode, peer, statistics}; fn default_tracker_config() -> Arc { Arc::new(Configuration::default()) } - fn initialized_public_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Public).into()); - initialized_tracker(configuration) + fn initialized_public_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Public).into()); + initialized_tracker(&configuration) } - fn initialized_private_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Private).into()); - initialized_tracker(configuration) + fn initialized_private_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Private).into()); + initialized_tracker(&configuration) } - fn initialized_whitelisted_tracker() -> Arc { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(TrackerMode::Listed).into()); - initialized_tracker(configuration) + fn initialized_whitelisted_tracker() -> Arc { + let configuration = Arc::new(TrackerConfigurationBuilder::default().with_mode(mode::Mode::Listed).into()); + initialized_tracker(&configuration) } - fn initialized_tracker(configuration: Arc) -> Arc { - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); - Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) + fn initialized_tracker(configuration: &Arc) -> Arc { + let (stats_event_sender, stats_repository) = statistics::Keeper::new_active_instance(); + Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -300,15 +289,15 @@ mod tests { } struct TorrentPeerBuilder { - peer: TorrentPeer, + peer: peer::Peer, } impl TorrentPeerBuilder { pub fn default() -> TorrentPeerBuilder { - let default_peer = TorrentPeer { - peer_id: PeerId([255u8; 20]), + let default_peer = peer::Peer { + peer_id: peer::Id([255u8; 20]), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DefaultClock::now(), + updated: Current::now(), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), @@ -317,7 +306,7 @@ mod tests { TorrentPeerBuilder { peer: default_peer } } - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { self.peer.peer_id = peer_id; self } @@ -332,7 +321,7 @@ mod tests { self } - pub fn into(self) -> TorrentPeer { + pub fn into(self) -> peer::Peer { self.peer } } @@ -354,7 +343,7 @@ mod tests { self } - pub fn with_mode(mut self, mode: TrackerMode) -> Self { + pub fn with_mode(mut self, mode: mode::Mode) -> Self { self.configuration.mode = mode; self } @@ -373,10 +362,9 @@ mod tests { use mockall::predicate::eq; use super::{default_tracker_config, sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_connect; + use crate::tracker::{self, statistics}; + use crate::udp::connection_cookie::{into_connection_id, make}; + use crate::udp::handlers::handle_connect; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; fn sample_connect_request() -> ConnectRequest { @@ -398,7 +386,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -417,7 +405,7 @@ mod tests { assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: request.transaction_id }) ); @@ -425,10 +413,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Connect)) + .with(eq(statistics::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -436,7 +424,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), torrent_tracker) .await @@ -445,16 +433,16 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Connect)) + .with(eq(statistics::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), torrent_tracker) .await @@ -471,7 +459,7 @@ mod tests { TransactionId, }; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; + use crate::udp::connection_cookie::{into_connection_id, make}; use crate::udp::handlers::tests::sample_ipv4_remote_addr; struct AnnounceRequestBuilder { @@ -485,7 +473,7 @@ mod tests { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: into_connection_id(&make_connection_cookie(&sample_ipv4_remote_addr())), + connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), transaction_id: TransactionId(0i32), info_hash: info_hash_aquatic, peer_id: AquaticPeerId([255u8; 20]), @@ -545,15 +533,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::tracker::{self, peer, statistics}; + use crate::udp::connection_cookie::{into_connection_id, make}; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv4_socket_address, TorrentPeerBuilder, }; - use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -567,7 +553,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -579,7 +565,7 @@ mod tests { let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); @@ -591,7 +577,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -630,7 +616,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -644,7 +630,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -653,7 +639,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv6 = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -662,10 +648,10 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() @@ -690,16 +676,16 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Announce)) + .with(eq(statistics::Event::Udp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_announce( @@ -716,11 +702,11 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::tracker::peer; + use crate::udp::connection_cookie::{into_connection_id, make}; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{initialized_public_tracker, TorrentPeerBuilder}; - use crate::PeerId; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -734,7 +720,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -749,7 +735,7 @@ mod tests { tracker.config.external_ip.clone().unwrap().parse::().unwrap(); let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) .into(); @@ -770,15 +756,13 @@ mod tests { }; use mockall::predicate::eq; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::tracker::{self, peer, statistics}; + use crate::udp::connection_cookie::{into_connection_id, make}; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::{ default_tracker_config, initialized_public_tracker, sample_ipv6_remote_addr, TorrentPeerBuilder, }; - use crate::PeerId; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -793,7 +777,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -805,7 +789,7 @@ mod tests { let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; let expected_peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -820,7 +804,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); let response = handle_announce(remote_addr, &request, initialized_public_tracker()) @@ -859,7 +843,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -874,7 +858,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -882,7 +866,7 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let peer_using_ipv4 = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); @@ -891,13 +875,13 @@ mod tests { .await; } - async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &request, tracker.clone()).await.unwrap() @@ -922,22 +906,22 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Announce)) + .with(eq(statistics::Event::Udp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .into(); handle_announce(remote_addr, &announce_request, tracker.clone()) @@ -951,19 +935,19 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::statistics::StatsTracker; - use crate::tracker::TorrentTracker; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_announce; + use crate::tracker; + use crate::tracker::statistics::Keeper; + use crate::udp::connection_cookie::{into_connection_id, make}; + use crate::udp::handlers::handle_announce; use crate::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::udp::handlers::tests::TrackerConfigurationBuilder; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(TorrentTracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(tracker::Tracker::new(&configuration, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -978,7 +962,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make_connection_cookie(&remote_addr))) + .with_connection_id(into_connection_id(&make(&remote_addr))) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -1013,11 +997,10 @@ mod tests { }; use super::TorrentPeerBuilder; - use crate::tracker::TorrentTracker; - use crate::udp::connection_cookie::{into_connection_id, make_connection_cookie}; - use crate::udp::handle_scrape; + use crate::tracker::{self, peer}; + use crate::udp::connection_cookie::{into_connection_id, make}; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{initialized_public_tracker, sample_ipv4_remote_addr}; - use crate::PeerId; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1035,7 +1018,7 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(&remote_addr)), + connection_id: into_connection_id(&make(&remote_addr)), transaction_id: TransactionId(0i32), info_hashes, }; @@ -1055,11 +1038,11 @@ mod tests { ); } - async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { - let peer_id = PeerId([255u8; 20]); + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + let peer_id = peer::Id([255u8; 20]); let peer = TorrentPeerBuilder::default() - .with_peer_id(PeerId(peer_id.0)) + .with_peer_id(peer::Id(peer_id.0)) .with_peer_addr(*remote_addr) .with_bytes_left(0) .into(); @@ -1073,13 +1056,13 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(remote_addr)), + connection_id: into_connection_id(&make(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } } - async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1123,7 +1106,7 @@ mod tests { use aquatic_udp_protocol::InfoHash; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::scrape_request::{ add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; @@ -1162,7 +1145,7 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::udp::handle_scrape; + use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; @@ -1218,7 +1201,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make_connection_cookie(remote_addr)), + connection_id: into_connection_id(&make(remote_addr)), transaction_id: TransactionId(0i32), info_hashes, } @@ -1231,24 +1214,23 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv4_remote_addr}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp4Scrape)) + .with(eq(statistics::Event::Udp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) @@ -1264,24 +1246,23 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::statistics::{MockTrackerStatisticsEventSender, StatsRepository, TrackerStatisticsEvent}; - use crate::tracker::TorrentTracker; + use crate::tracker::{self, statistics}; use crate::udp::handlers::handle_scrape; use crate::udp::handlers::tests::{default_tracker_config, sample_ipv6_remote_addr}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut stats_event_sender_mock = MockTrackerStatisticsEventSender::new(); + let mut stats_event_sender_mock = statistics::MockEventSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(TrackerStatisticsEvent::Udp6Scrape)) + .with(eq(statistics::Event::Udp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - TorrentTracker::new(default_tracker_config(), Some(stats_event_sender), StatsRepository::new()).unwrap(), + tracker::Tracker::new(&default_tracker_config(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), tracker.clone()) diff --git a/src/udp/mod.rs b/src/udp/mod.rs index 4c98875c5..8b8c8c4f8 100644 --- a/src/udp/mod.rs +++ b/src/udp/mod.rs @@ -1,10 +1,5 @@ -pub use self::errors::*; -pub use self::handlers::*; -pub use self::request::*; -pub use self::server::*; - pub mod connection_cookie; -pub mod errors; +pub mod error; pub mod handlers; pub mod request; pub mod server; @@ -14,4 +9,4 @@ pub type Port = u16; pub type TransactionId = i64; pub const MAX_PACKET_SIZE: usize = 1496; -pub const PROTOCOL_ID: i64 = 0x41727101980; +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/src/udp/request.rs b/src/udp/request.rs index 6531f54b9..c4326b291 100644 --- a/src/udp/request.rs +++ b/src/udp/request.rs @@ -1,6 +1,6 @@ use aquatic_udp_protocol::AnnounceRequest; -use crate::InfoHash; +use crate::protocol::info_hash::InfoHash; // struct AnnounceRequest { // pub connection_id: i64, @@ -17,14 +17,15 @@ use crate::InfoHash; // pub port: Port // } -pub struct AnnounceRequestWrapper { +pub struct AnnounceWrapper { pub announce_request: AnnounceRequest, pub info_hash: InfoHash, } -impl AnnounceRequestWrapper { - pub fn new(announce_request: AnnounceRequest) -> Self { - AnnounceRequestWrapper { +impl AnnounceWrapper { + #[must_use] + pub fn new(announce_request: &AnnounceRequest) -> Self { + AnnounceWrapper { announce_request: announce_request.clone(), info_hash: InfoHash(announce_request.info_hash.0), } diff --git a/src/udp/server.rs b/src/udp/server.rs index 2f41c3c4d..5bd835365 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -6,24 +6,31 @@ use aquatic_udp_protocol::Response; use log::{debug, info}; use tokio::net::UdpSocket; -use crate::tracker::TorrentTracker; -use crate::udp::{handle_packet, MAX_PACKET_SIZE}; +use crate::tracker; +use crate::udp::handlers::handle_packet; +use crate::udp::MAX_PACKET_SIZE; -pub struct UdpServer { +pub struct Udp { socket: Arc, - tracker: Arc, + tracker: Arc, } -impl UdpServer { - pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { +impl Udp { + /// # Errors + /// + /// Will return `Err` unable to bind to the supplied `bind_address`. + pub async fn new(tracker: Arc, bind_address: &str) -> tokio::io::Result { let socket = UdpSocket::bind(bind_address).await?; - Ok(UdpServer { + Ok(Udp { socket: Arc::new(socket), tracker, }) } + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. pub async fn start(&self) { loop { let mut data = [0; MAX_PACKET_SIZE]; @@ -42,7 +49,7 @@ impl UdpServer { debug!("{:?}", payload); let response = handle_packet(remote_addr, payload, tracker).await; - UdpServer::send_response(socket, remote_addr, response).await; + Udp::send_response(socket, remote_addr, response).await; } } } @@ -56,11 +63,12 @@ impl UdpServer { match response.write(&mut cursor) { Ok(_) => { + #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner = cursor.get_ref(); debug!("{:?}", &inner[..position]); - UdpServer::send_packet(socket, &remote_addr, &inner[..position]).await; + Udp::send_packet(socket, &remote_addr, &inner[..position]).await; } Err(_) => { debug!("could not write response to bytes."); @@ -70,6 +78,6 @@ impl UdpServer { async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { // doesn't matter if it reaches or not - let _ = socket.send_to(payload, remote_addr).await; + drop(socket.send_to(payload, remote_addr).await); } } diff --git a/tests/api.rs b/tests/api.rs index 475da9a24..706cd0b8d 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -1,6 +1,6 @@ /// Integration tests for the tracker API /// -/// cargo test tracker_api -- --nocapture +/// cargo test `tracker_api` -- --nocapture extern crate rand; mod common; @@ -16,16 +16,17 @@ mod tracker_api { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use reqwest::Response; use tokio::task::JoinHandle; - use torrust_tracker::api::resources::auth_key_resource::AuthKeyResource; - use torrust_tracker::api::resources::stats_resource::StatsResource; - use torrust_tracker::api::resources::torrent_resource::{TorrentListItemResource, TorrentPeerResource, TorrentResource}; + use torrust_tracker::api::resource; + use torrust_tracker::api::resource::auth_key::AuthKey; + use torrust_tracker::api::resource::stats::Stats; + use torrust_tracker::api::resource::torrent::{self, Torrent}; + use torrust_tracker::config::Configuration; use torrust_tracker::jobs::tracker_api; - use torrust_tracker::peer::TorrentPeer; use torrust_tracker::protocol::clock::DurationSinceUnixEpoch; - use torrust_tracker::tracker::key::AuthKey; - use torrust_tracker::tracker::statistics::StatsTracker; - use torrust_tracker::tracker::TorrentTracker; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration, InfoHash, PeerId}; + use torrust_tracker::protocol::info_hash::InfoHash; + use torrust_tracker::tracker::statistics::Keeper; + use torrust_tracker::tracker::{auth, peer}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -43,7 +44,7 @@ mod tracker_api { assert!(api_server .tracker .unwrap() - .verify_auth_key(&AuthKey::from(auth_key)) + .verify_auth_key(&auth::Key::from(auth_key)) .await .is_ok()); } @@ -103,7 +104,7 @@ mod tracker_api { assert_eq!( torrent_resource, - TorrentResource { + Torrent { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -134,7 +135,7 @@ mod tracker_api { assert_eq!( torrent_resources, - vec![TorrentListItemResource { + vec![torrent::ListItem { info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), seeders: 1, completed: 0, @@ -165,7 +166,7 @@ mod tracker_api { assert_eq!( stats_resource, - StatsResource { + Stats { torrents: 1, seeders: 1, completed: 0, @@ -186,17 +187,17 @@ mod tracker_api { ); } - fn sample_torrent_peer() -> (TorrentPeer, TorrentPeerResource) { - let torrent_peer = TorrentPeer { - peer_id: PeerId(*b"-qB00000000000000000"), + fn sample_torrent_peer() -> (peer::Peer, resource::peer::Peer) { + let torrent_peer = peer::Peer { + peer_id: peer::Id(*b"-qB00000000000000000"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1669397478934, 0), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes(0), downloaded: NumberOfBytes(0), left: NumberOfBytes(0), event: AnnounceEvent::Started, }; - let torrent_peer_resource = TorrentPeerResource::from(torrent_peer); + let torrent_peer_resource = resource::peer::Peer::from(torrent_peer); (torrent_peer, torrent_peer_resource) } @@ -235,7 +236,7 @@ mod tracker_api { struct ApiServer { pub started: AtomicBool, pub job: Option>, - pub tracker: Option>, + pub tracker: Option>, pub connection_info: Option, } @@ -274,10 +275,10 @@ mod tracker_api { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize stats tracker - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) @@ -286,7 +287,7 @@ mod tracker_api { self.tracker = Some(tracker.clone()); // Initialize logging - logging::setup_logging(&configuration); + logging::setup(&configuration); // Start the HTTP API job self.job = Some(tracker_api::start_job(&configuration, tracker).await); @@ -309,7 +310,7 @@ mod tracker_api { Self { connection_info } } - pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKeyResource { + pub async fn generate_auth_key(&self, seconds_valid: i32) -> AuthKey { let url = format!( "http://{}/api/key/{}?token={}", &self.connection_info.bind_address, &seconds_valid, &self.connection_info.api_token @@ -325,7 +326,7 @@ mod tracker_api { reqwest::Client::new().post(url.clone()).send().await.unwrap() } - pub async fn get_torrent(&self, info_hash: &str) -> TorrentResource { + pub async fn get_torrent(&self, info_hash: &str) -> Torrent { let url = format!( "http://{}/api/torrent/{}?token={}", &self.connection_info.bind_address, &info_hash, &self.connection_info.api_token @@ -337,12 +338,12 @@ mod tracker_api { .send() .await .unwrap() - .json::() + .json::() .await .unwrap() } - pub async fn get_torrents(&self) -> Vec { + pub async fn get_torrents(&self) -> Vec { let url = format!( "http://{}/api/torrents?token={}", &self.connection_info.bind_address, &self.connection_info.api_token @@ -354,12 +355,12 @@ mod tracker_api { .send() .await .unwrap() - .json::>() + .json::>() .await .unwrap() } - pub async fn get_tracker_statistics(&self) -> StatsResource { + pub async fn get_tracker_statistics(&self) -> Stats { let url = format!( "http://{}/api/stats?token={}", &self.connection_info.bind_address, &self.connection_info.api_token @@ -371,7 +372,7 @@ mod tracker_api { .send() .await .unwrap() - .json::() + .json::() .await .unwrap() } diff --git a/tests/udp.rs b/tests/udp.rs index ab96259c5..8bad37dbe 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -1,6 +1,6 @@ /// Integration tests for UDP tracker server /// -/// cargo test udp_tracker_server -- --nocapture +/// cargo test `udp_tracker_server` -- --nocapture extern crate rand; mod common; @@ -18,11 +18,11 @@ mod udp_tracker_server { }; use tokio::net::UdpSocket; use tokio::task::JoinHandle; + use torrust_tracker::config::Configuration; use torrust_tracker::jobs::udp_tracker; - use torrust_tracker::tracker::statistics::StatsTracker; - use torrust_tracker::tracker::TorrentTracker; + use torrust_tracker::tracker::statistics::Keeper; use torrust_tracker::udp::MAX_PACKET_SIZE; - use torrust_tracker::{ephemeral_instance_keys, logging, static_time, Configuration}; + use torrust_tracker::{ephemeral_instance_keys, logging, static_time, tracker}; use crate::common::ephemeral_random_port; @@ -48,7 +48,7 @@ mod udp_tracker_server { } } - pub async fn start(&mut self, configuration: Arc) { + pub fn start(&mut self, configuration: &Arc) { if !self.started.load(Ordering::Relaxed) { // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -57,10 +57,10 @@ mod udp_tracker_server { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize stats tracker - let (stats_event_sender, stats_repository) = StatsTracker::new_active_instance(); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); // Initialize Torrust tracker - let tracker = match TorrentTracker::new(configuration.clone(), Some(stats_event_sender), stats_repository) { + let tracker = match tracker::Tracker::new(&configuration.clone(), Some(stats_event_sender), stats_repository) { Ok(tracker) => Arc::new(tracker), Err(error) => { panic!("{}", error) @@ -68,7 +68,7 @@ mod udp_tracker_server { }; // Initialize logging - logging::setup_logging(&configuration); + logging::setup(configuration); let udp_tracker_config = &configuration.udp_trackers[0]; @@ -82,9 +82,9 @@ mod udp_tracker_server { } } - async fn new_running_udp_server(configuration: Arc) -> UdpServer { + fn new_running_udp_server(configuration: &Arc) -> UdpServer { let mut udp_server = UdpServer::new(); - udp_server.start(configuration).await; + udp_server.start(configuration); udp_server } @@ -101,7 +101,7 @@ mod udp_tracker_server { } async fn connect(&self, remote_address: &str) { - self.socket.connect(remote_address).await.unwrap() + self.socket.connect(remote_address).await.unwrap(); } async fn send(&self, bytes: &[u8]) -> usize { @@ -115,7 +115,7 @@ mod udp_tracker_server { } } - /// Creates a new UdpClient connected to a Udp server + /// Creates a new `UdpClient` connected to a Udp server async fn new_connected_udp_client(remote_address: &str) -> UdpClient { let client = UdpClient::bind(&source_address(ephemeral_random_port())).await; client.connect(remote_address).await; @@ -134,12 +134,13 @@ mod udp_tracker_server { let request_data = match request.write(&mut cursor) { Ok(_) => { + #[allow(clippy::cast_possible_truncation)] let position = cursor.position() as usize; let inner_request_buffer = cursor.get_ref(); // Return slice which contains written request data &inner_request_buffer[..position] } - Err(_) => panic!("could not write request to bytes."), + Err(e) => panic!("could not write request to bytes: {e}."), }; self.udp_client.send(request_data).await @@ -154,7 +155,7 @@ mod udp_tracker_server { } } - /// Creates a new UdpTrackerClient connected to a Udp Tracker server + /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server async fn new_connected_udp_tracker_client(remote_address: &str) -> UdpTrackerClient { let udp_client = new_connected_udp_client(remote_address).await; UdpTrackerClient { udp_client } @@ -199,7 +200,7 @@ mod udp_tracker_server { async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_client(&udp_server.bind_address.unwrap()).await; @@ -216,7 +217,7 @@ mod udp_tracker_server { async fn should_return_a_connect_response_when_the_client_sends_a_connection_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; @@ -248,7 +249,7 @@ mod udp_tracker_server { async fn should_return_an_announce_response_when_the_client_sends_an_announce_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await; @@ -282,7 +283,7 @@ mod udp_tracker_server { async fn should_return_a_scrape_response_when_the_client_sends_a_scrape_request() { let configuration = tracker_configuration(); - let udp_server = new_running_udp_server(configuration).await; + let udp_server = new_running_udp_server(&configuration); let client = new_connected_udp_tracker_client(&udp_server.bind_address.unwrap()).await;