diff --git a/Cargo.lock b/Cargo.lock index 1a14cd6b2f7..af52affd643 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4899,6 +4899,7 @@ dependencies = [ "tendermint-rpc", "thiserror 2.0.12", "time", + "tracing", "ts-rs", "utoipa", ] @@ -6231,6 +6232,7 @@ dependencies = [ "nym-wireguard", "nym-wireguard-types", "rand 0.8.5", + "rand_chacha 0.3.1", "serde", "serde_json", "sha2 0.10.9", @@ -6470,6 +6472,7 @@ version = "0.3.0" dependencies = [ "pem", "tracing", + "zeroize", ] [[package]] @@ -6729,7 +6732,6 @@ dependencies = [ "nym-topology", "rand 0.8.5", "rand_chacha 0.3.1", - "serde", "thiserror 2.0.12", "tracing", "wasm-bindgen", @@ -6773,8 +6775,8 @@ dependencies = [ name = "nym-sphinx-forwarding" version = "0.1.0" dependencies = [ - "nym-outfox", "nym-sphinx-addressing", + "nym-sphinx-anonymous-replies", "nym-sphinx-params", "nym-sphinx-types", "thiserror 2.0.12", diff --git a/Cargo.toml b/Cargo.toml index cd9914aa612..0b11cb64609 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -344,7 +344,6 @@ utoipauto = "0.2" uuid = "*" vergen = { version = "=8.3.1", default-features = false } walkdir = "2" -wasm-bindgen-test = "0.3.49" x25519-dalek = "2.0.0" zeroize = "1.7.0" @@ -392,6 +391,7 @@ serde-wasm-bindgen = "0.6.5" tsify = "0.4.5" wasm-bindgen = "0.2.99" wasm-bindgen-futures = "0.4.49" +wasm-bindgen-test = "0.3.49" wasmtimer = "0.4.1" web-sys = "0.3.76" diff --git a/common/client-core/src/client/real_messages_control/message_handler.rs b/common/client-core/src/client/real_messages_control/message_handler.rs index f7cee9f3e46..c88a6257ce6 100644 --- a/common/client-core/src/client/real_messages_control/message_handler.rs +++ b/common/client-core/src/client/real_messages_control/message_handler.rs @@ -12,7 +12,7 @@ use crate::client::topology_control::{TopologyAccessor, TopologyReadPermit}; use nym_sphinx::acknowledgements::AckKey; use nym_sphinx::addressing::clients::Recipient; use nym_sphinx::anonymous_replies::requests::{AnonymousSenderTag, RepliableMessage, ReplyMessage}; -use nym_sphinx::anonymous_replies::{ReplySurb, SurbEncryptionKey}; +use nym_sphinx::anonymous_replies::ReplySurbWithKeyRotation; use nym_sphinx::chunking::fragment::{Fragment, FragmentIdentifier}; use nym_sphinx::message::NymMessage; use nym_sphinx::params::{PacketSize, PacketType}; @@ -44,7 +44,10 @@ pub enum PreparationError { } impl PreparationError { - fn return_surbs(self, returned_surbs: Vec) -> SurbWrappedPreparationError { + fn return_surbs( + self, + returned_surbs: Vec, + ) -> SurbWrappedPreparationError { SurbWrappedPreparationError { source: self, returned_surbs: Some(returned_surbs), @@ -58,7 +61,7 @@ pub struct SurbWrappedPreparationError { #[source] source: PreparationError, - returned_surbs: Option>, + returned_surbs: Option>, } impl From for SurbWrappedPreparationError @@ -268,10 +271,10 @@ where } } - async fn generate_reply_surbs_with_keys( + async fn generate_reply_surbs( &mut self, amount: usize, - ) -> Result<(Vec, Vec), PreparationError> { + ) -> Result, PreparationError> { let topology_permit = self.topology_access.get_read_permit().await; let topology = self.get_topology(&topology_permit)?; @@ -281,19 +284,14 @@ where topology, )?; - let reply_keys = reply_surbs - .iter() - .map(|s| *s.encryption_key()) - .collect::>(); - - Ok((reply_surbs, reply_keys)) + Ok(reply_surbs) } pub(crate) async fn try_send_single_surb_message( &mut self, target: AnonymousSenderTag, message: ReplyMessage, - reply_surb: ReplySurb, + reply_surb: ReplySurbWithKeyRotation, is_extra_surb_request: bool, ) -> Result<(), SurbWrappedPreparationError> { let msg = NymMessage::new_reply(message); @@ -347,7 +345,7 @@ where pub(crate) async fn try_request_additional_reply_surbs( &mut self, from: AnonymousSenderTag, - reply_surb: ReplySurb, + reply_surb: ReplySurbWithKeyRotation, amount: u32, ) -> Result<(), SurbWrappedPreparationError> { debug!("requesting {amount} reply SURBs from {from}"); @@ -387,7 +385,7 @@ where &mut self, target: AnonymousSenderTag, fragments: Vec, - reply_surbs: Vec, + reply_surbs: Vec, lane: TransmissionLane, ) -> Result<(), SurbWrappedPreparationError> { // TODO: technically this is performing an unnecessary cloning, but in the grand scheme of things @@ -404,7 +402,7 @@ where &mut self, target: AnonymousSenderTag, fragments: Vec<(TransmissionLane, FragmentWithMaxRetransmissions)>, - reply_surbs: Vec, + reply_surbs: Vec, ) -> Result<(), SurbWrappedPreparationError> { let prepared_fragments = self .prepare_reply_chunks_for_sending( @@ -541,8 +539,12 @@ where ) -> Result<(), PreparationError> { debug!("Sending additional reply SURBs with packet type {packet_type}"); let sender_tag = self.get_or_create_sender_tag(&recipient); - let (reply_surbs, reply_keys) = - self.generate_reply_surbs_with_keys(amount as usize).await?; + let reply_surbs = self.generate_reply_surbs(amount as usize).await?; + + let reply_keys = reply_surbs + .iter() + .map(|s| *s.encryption_key()) + .collect::>(); let message = NymMessage::new_repliable(RepliableMessage::new_additional_surbs( self.config.use_legacy_sphinx_format, @@ -579,9 +581,12 @@ where ) -> Result<(), SurbWrappedPreparationError> { debug!("Sending message with reply SURBs with packet type {packet_type}"); let sender_tag = self.get_or_create_sender_tag(&recipient); - let (reply_surbs, reply_keys) = self - .generate_reply_surbs_with_keys(num_reply_surbs as usize) - .await?; + let reply_surbs = self.generate_reply_surbs(num_reply_surbs as usize).await?; + + let reply_keys = reply_surbs + .iter() + .map(|s| *s.encryption_key()) + .collect::>(); let message = NymMessage::new_repliable(RepliableMessage::new_data( self.config.use_legacy_sphinx_format, @@ -629,7 +634,7 @@ where pub(crate) async fn prepare_reply_chunks_for_sending( &mut self, fragments: Vec, - reply_surbs: Vec, + reply_surbs: Vec, ) -> Result, SurbWrappedPreparationError> { debug_assert_eq!( fragments.len(), @@ -665,7 +670,7 @@ where pub(crate) async fn try_prepare_single_reply_chunk_for_sending( &mut self, - reply_surb: ReplySurb, + reply_surb: ReplySurbWithKeyRotation, chunk: Fragment, ) -> Result { let topology_permit = self.topology_access.get_read_permit().await; diff --git a/common/client-core/src/client/replies/reply_controller/mod.rs b/common/client-core/src/client/replies/reply_controller/mod.rs index 2cf7ac9eaad..c043af1bdfa 100644 --- a/common/client-core/src/client/replies/reply_controller/mod.rs +++ b/common/client-core/src/client/replies/reply_controller/mod.rs @@ -11,7 +11,7 @@ use futures::StreamExt; use log::{debug, error, info, trace, warn}; use nym_sphinx::addressing::clients::Recipient; use nym_sphinx::anonymous_replies::requests::AnonymousSenderTag; -use nym_sphinx::anonymous_replies::ReplySurb; +use nym_sphinx::anonymous_replies::ReplySurbWithKeyRotation; use nym_sphinx::chunking::fragment::FragmentIdentifier; use nym_task::connections::{ConnectionId, TransmissionLane}; use nym_task::TaskClient; @@ -499,7 +499,7 @@ where async fn handle_received_surbs( &mut self, from: AnonymousSenderTag, - reply_surbs: Vec, + reply_surbs: Vec, from_surb_request: bool, ) { trace!("handling received surbs"); diff --git a/common/client-core/src/client/replies/reply_controller/requests.rs b/common/client-core/src/client/replies/reply_controller/requests.rs index 30afc2585aa..26250347100 100644 --- a/common/client-core/src/client/replies/reply_controller/requests.rs +++ b/common/client-core/src/client/replies/reply_controller/requests.rs @@ -6,7 +6,7 @@ use futures::channel::{mpsc, oneshot}; use log::error; use nym_sphinx::addressing::clients::Recipient; use nym_sphinx::anonymous_replies::requests::AnonymousSenderTag; -use nym_sphinx::anonymous_replies::ReplySurb; +use nym_sphinx::anonymous_replies::ReplySurbWithKeyRotation; use nym_task::connections::{ConnectionId, TransmissionLane}; use std::sync::Weak; @@ -81,7 +81,7 @@ impl ReplyControllerSender { pub(crate) fn send_additional_surbs( &self, sender_tag: AnonymousSenderTag, - reply_surbs: Vec, + reply_surbs: Vec, from_surb_request: bool, ) -> Result<(), ReplyControllerSenderError> { self.0 @@ -167,7 +167,7 @@ pub enum ReplyControllerMessage { AdditionalSurbs { sender_tag: AnonymousSenderTag, - reply_surbs: Vec, + reply_surbs: Vec, from_surb_request: bool, }, diff --git a/common/client-core/src/client/topology_control/nym_api_provider.rs b/common/client-core/src/client/topology_control/nym_api_provider.rs index 69a4a8107df..b49db12864e 100644 --- a/common/client-core/src/client/topology_control/nym_api_provider.rs +++ b/common/client-core/src/client/topology_control/nym_api_provider.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use log::{debug, error, warn}; use nym_topology::provider_trait::TopologyProvider; -use nym_topology::NymTopology; +use nym_topology::{NymTopology, NymTopologyMetadata}; use nym_validator_client::UserAgent; use rand::prelude::SliceRandom; use rand::thread_rng; @@ -89,55 +89,84 @@ impl NymApiTopologyProvider { let rewarded_set_fut = self.validator_client.get_current_rewarded_set(); let topology = if self.config.use_extended_topology { - let all_nodes_fut = self.validator_client.get_all_basic_nodes(); + let all_nodes_fut = self.validator_client.get_all_basic_nodes_with_metadata(); // Join rewarded_set_fut and all_nodes_fut concurrently - let (rewarded_set, all_nodes) = futures::try_join!(rewarded_set_fut, all_nodes_fut) + let (rewarded_set, all_nodes_res) = futures::try_join!(rewarded_set_fut, all_nodes_fut) .inspect_err(|err| error!("failed to get network nodes: {err}")) .ok()?; + let metadata = all_nodes_res.metadata; + let all_nodes = all_nodes_res.nodes; + debug!( "there are {} nodes on the network (before filtering)", all_nodes.len() ); - let mut topology = NymTopology::new_empty(rewarded_set); - topology.add_additional_nodes(all_nodes.iter().filter(|n| { - n.performance.round_to_integer() >= self.config.min_node_performance() - })); - - topology + let nodes_filtered = all_nodes + .into_iter() + .filter(|n| n.performance.round_to_integer() >= self.config.min_node_performance()) + .collect::>(); + + NymTopology::new( + NymTopologyMetadata::new(metadata.rotation_id, metadata.absolute_epoch_id), + rewarded_set, + Vec::new(), + ) + .with_skimmed_nodes(&nodes_filtered) } else { // if we're not using extended topology, we're only getting active set mixnodes and gateways let mixnodes_fut = self .validator_client - .get_all_basic_active_mixing_assigned_nodes(); + .get_all_basic_active_mixing_assigned_nodes_with_metadata(); // TODO: we really should be getting ACTIVE gateways only - let gateways_fut = self.validator_client.get_all_basic_entry_assigned_nodes(); + let gateways_fut = self + .validator_client + .get_all_basic_entry_assigned_nodes_v2(); - let (rewarded_set, mixnodes, gateways) = + let (rewarded_set, mixnodes_res, gateways_res) = futures::try_join!(rewarded_set_fut, mixnodes_fut, gateways_fut) .inspect_err(|err| { error!("failed to get network nodes: {err}"); }) .ok()?; + let metadata = mixnodes_res.metadata; + let mixnodes = mixnodes_res.nodes; + + if gateways_res.metadata != metadata { + warn!("inconsistent nodes metadata between mixnodes and gateways calls! {metadata:?} and {:?}", gateways_res.metadata); + return None; + } + + let gateways = gateways_res.nodes; + debug!( "there are {} mixnodes and {} gateways in total (before performance filtering)", mixnodes.len(), gateways.len() ); - let mut topology = NymTopology::new_empty(rewarded_set); - topology.add_additional_nodes(mixnodes.iter().filter(|m| { - m.performance.round_to_integer() >= self.config.min_mixnode_performance - })); - topology.add_additional_nodes(gateways.iter().filter(|m| { - m.performance.round_to_integer() >= self.config.min_gateway_performance - })); - - topology + let mut nodes = Vec::new(); + for mix in mixnodes { + if mix.performance.round_to_integer() >= self.config.min_mixnode_performance { + nodes.push(mix) + } + } + for gateway in gateways { + if gateway.performance.round_to_integer() >= self.config.min_gateway_performance { + nodes.push(gateway) + } + } + + NymTopology::new( + NymTopologyMetadata::new(metadata.rotation_id, metadata.absolute_epoch_id), + rewarded_set, + Vec::new(), + ) + .with_skimmed_nodes(&nodes) }; if !topology.is_minimally_routable() { diff --git a/common/client-core/src/init/helpers.rs b/common/client-core/src/init/helpers.rs index b007243ce6e..f2cfb98f926 100644 --- a/common/client-core/src/init/helpers.rs +++ b/common/client-core/src/init/helpers.rs @@ -107,7 +107,7 @@ pub async fn gateways_for_init( log::debug!("Fetching list of gateways from: {nym_api}"); - let gateways = client.get_all_basic_entry_assigned_nodes().await?; + let gateways = client.get_all_basic_entry_assigned_nodes_v2().await?.nodes; info!("nym api reports {} gateways", gateways.len()); log::trace!("Gateways: {:#?}", gateways); diff --git a/common/client-core/surb-storage/fs_surbs_migrations/20250425120000_add_surb_key_rotation.sql b/common/client-core/surb-storage/fs_surbs_migrations/20250425120000_add_surb_key_rotation.sql new file mode 100644 index 00000000000..8dd70e2e9dc --- /dev/null +++ b/common/client-core/surb-storage/fs_surbs_migrations/20250425120000_add_surb_key_rotation.sql @@ -0,0 +1,8 @@ +/* + * Copyright 2025 - Nym Technologies SA + * SPDX-License-Identifier: Apache-2.0 + */ + +-- default value of 0 implies 'unknown' variant +ALTER TABLE reply_surb + ADD COLUMN encoded_key_rotation TINYINT NOT NULL DEFAULT 0; \ No newline at end of file diff --git a/common/client-core/surb-storage/src/backend/fs_backend/manager.rs b/common/client-core/surb-storage/src/backend/fs_backend/manager.rs index 02316ddb7fa..c8f39fdf964 100644 --- a/common/client-core/surb-storage/src/backend/fs_backend/manager.rs +++ b/common/client-core/surb-storage/src/backend/fs_backend/manager.rs @@ -205,7 +205,10 @@ impl StorageManager { ) -> Result, sqlx::Error> { sqlx::query_as!( StoredReplySurb, - "SELECT * FROM reply_surb WHERE reply_surb_sender_id = ?", + r#" + SELECT reply_surb_sender_id, reply_surb, encoded_key_rotation as "encoded_key_rotation: u8" FROM reply_surb + WHERE reply_surb_sender_id = ? + "#, sender_id ) .fetch_all(&self.connection_pool) @@ -230,10 +233,11 @@ impl StorageManager { ) -> Result<(), sqlx::Error> { sqlx::query!( r#" - INSERT INTO reply_surb(reply_surb_sender_id, reply_surb) VALUES (?, ?); + INSERT INTO reply_surb(reply_surb_sender_id, reply_surb, encoded_key_rotation) VALUES (?, ?, ?); "#, stored_reply_surb.reply_surb_sender_id, - stored_reply_surb.reply_surb + stored_reply_surb.reply_surb, + stored_reply_surb.encoded_key_rotation ) .execute(&self.connection_pool) .await?; diff --git a/common/client-core/surb-storage/src/backend/fs_backend/models.rs b/common/client-core/surb-storage/src/backend/fs_backend/models.rs index 8acf83f6768..c91c14b5916 100644 --- a/common/client-core/surb-storage/src/backend/fs_backend/models.rs +++ b/common/client-core/surb-storage/src/backend/fs_backend/models.rs @@ -8,8 +8,10 @@ use nym_crypto::Digest; use nym_sphinx::addressing::clients::{Recipient, RecipientBytes}; use nym_sphinx::anonymous_replies::encryption_key::EncryptionKeyDigest; use nym_sphinx::anonymous_replies::requests::{AnonymousSenderTag, SENDER_TAG_SIZE}; -use nym_sphinx::anonymous_replies::{ReplySurb, SurbEncryptionKey, SurbEncryptionKeySize}; -use nym_sphinx::params::ReplySurbKeyDigestAlgorithm; +use nym_sphinx::anonymous_replies::{ + ReplySurb, ReplySurbWithKeyRotation, SurbEncryptionKey, SurbEncryptionKeySize, +}; +use nym_sphinx::params::{ReplySurbKeyDigestAlgorithm, SphinxKeyRotation}; #[derive(Debug, Clone)] pub struct StoredSenderTag { @@ -146,24 +148,40 @@ impl TryFrom for (AnonymousSenderTag, i64) { pub struct StoredReplySurb { pub reply_surb_sender_id: i64, pub reply_surb: Vec, + + // encodes only whether it's 'even', 'odd' or 'unknown' (default) + // and not the whole id because that's redundant + pub encoded_key_rotation: u8, } impl StoredReplySurb { - pub fn new(reply_surb_sender_id: i64, reply_surb: &ReplySurb) -> Self { + pub fn new(reply_surb_sender_id: i64, reply_surb: &ReplySurbWithKeyRotation) -> Self { StoredReplySurb { reply_surb_sender_id, - reply_surb: reply_surb.to_bytes(), + reply_surb: reply_surb.inner_reply_surb().to_bytes(), + encoded_key_rotation: reply_surb.key_rotation() as u8, } } } -impl TryFrom for ReplySurb { +impl TryFrom for ReplySurbWithKeyRotation { type Error = StorageError; fn try_from(value: StoredReplySurb) -> Result { - ReplySurb::from_bytes(&value.reply_surb).map_err(|err| StorageError::CorruptedData { - details: format!("failed to recover the reply surb: {err}"), - }) + let key_rotation = + SphinxKeyRotation::try_from(value.encoded_key_rotation).map_err(|err| { + StorageError::CorruptedData { + details: format!("stored key rotation was malformed: {err}"), + } + })?; + + let reply_surb = ReplySurb::from_bytes(&value.reply_surb).map_err(|err| { + StorageError::CorruptedData { + details: format!("failed to recover the reply surb: {err}"), + } + })?; + + Ok(reply_surb.with_key_rotation(key_rotation)) } } diff --git a/common/client-core/surb-storage/src/surb_storage.rs b/common/client-core/surb-storage/src/surb_storage.rs index 92050c01045..ac453b2aa15 100644 --- a/common/client-core/surb-storage/src/surb_storage.rs +++ b/common/client-core/surb-storage/src/surb_storage.rs @@ -5,7 +5,7 @@ use dashmap::iter::Iter; use dashmap::DashMap; use log::trace; use nym_sphinx::anonymous_replies::requests::AnonymousSenderTag; -use nym_sphinx::anonymous_replies::ReplySurb; +use nym_sphinx::anonymous_replies::ReplySurbWithKeyRotation; use std::collections::VecDeque; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; @@ -134,7 +134,7 @@ impl ReceivedReplySurbsMap { &self, target: &AnonymousSenderTag, amount: usize, - ) -> (Option>, usize) { + ) -> (Option>, usize) { if let Some(mut entry) = self.inner.data.get_mut(target) { let surbs_left = entry.items_left(); if surbs_left < self.min_surb_threshold() + amount { @@ -150,7 +150,7 @@ impl ReceivedReplySurbsMap { pub fn get_reply_surb_ignoring_threshold( &self, target: &AnonymousSenderTag, - ) -> Option<(Option, usize)> { + ) -> Option<(Option, usize)> { self.inner .data .get_mut(target) @@ -160,7 +160,7 @@ impl ReceivedReplySurbsMap { pub fn get_reply_surb( &self, target: &AnonymousSenderTag, - ) -> Option<(Option, usize)> { + ) -> Option<(Option, usize)> { self.inner.data.get_mut(target).map(|mut entry| { let surbs_left = entry.items_left(); if surbs_left < self.min_surb_threshold() { @@ -171,7 +171,7 @@ impl ReceivedReplySurbsMap { }) } - pub fn insert_surbs>( + pub fn insert_surbs>( &self, target: &AnonymousSenderTag, surbs: I, @@ -189,14 +189,14 @@ impl ReceivedReplySurbsMap { pub struct ReceivedReplySurbs { // in the future we'd probably want to put extra data here to indicate when the SURBs got received // so we could invalidate entries from the previous key rotations - data: VecDeque, + data: VecDeque, pending_reception: u32, surbs_last_received_at_timestamp: i64, } impl ReceivedReplySurbs { - fn new(initial_surbs: VecDeque) -> Self { + fn new(initial_surbs: VecDeque) -> Self { ReceivedReplySurbs { data: initial_surbs, pending_reception: 0, @@ -206,7 +206,7 @@ impl ReceivedReplySurbs { #[cfg(all(not(target_arch = "wasm32"), feature = "fs-surb-storage"))] pub fn new_retrieved( - surbs: Vec, + surbs: Vec, surbs_last_received_at_timestamp: i64, ) -> ReceivedReplySurbs { ReceivedReplySurbs { @@ -217,7 +217,7 @@ impl ReceivedReplySurbs { } #[cfg(all(not(target_arch = "wasm32"), feature = "fs-surb-storage"))] - pub fn surbs_ref(&self) -> &VecDeque { + pub fn surbs_ref(&self) -> &VecDeque { &self.data } @@ -243,7 +243,10 @@ impl ReceivedReplySurbs { self.pending_reception = 0; } - pub fn get_reply_surbs(&mut self, amount: usize) -> (Option>, usize) { + pub fn get_reply_surbs( + &mut self, + amount: usize, + ) -> (Option>, usize) { if self.items_left() < amount { (None, self.items_left()) } else { @@ -252,11 +255,11 @@ impl ReceivedReplySurbs { } } - pub fn get_reply_surb(&mut self) -> (Option, usize) { + pub fn get_reply_surb(&mut self) -> (Option, usize) { (self.pop_surb(), self.items_left()) } - fn pop_surb(&mut self) -> Option { + fn pop_surb(&mut self) -> Option { self.data.pop_front() } @@ -265,7 +268,10 @@ impl ReceivedReplySurbs { } // realistically we're always going to be getting multiple surbs at once - pub fn insert_reply_surbs>(&mut self, surbs: I) { + pub fn insert_reply_surbs>( + &mut self, + surbs: I, + ) { let mut v = surbs.into_iter().collect::>(); trace!("storing {} surbs in the storage", v.len()); self.data.append(&mut v); diff --git a/common/client-libs/gateway-client/src/client/mod.rs b/common/client-libs/gateway-client/src/client/mod.rs index fc1676ab67c..eeadec5ea13 100644 --- a/common/client-libs/gateway-client/src/client/mod.rs +++ b/common/client-libs/gateway-client/src/client/mod.rs @@ -21,8 +21,8 @@ use nym_crypto::asymmetric::ed25519; use nym_gateway_requests::registration::handshake::client_handshake; use nym_gateway_requests::{ BinaryRequest, ClientControlRequest, ClientRequest, GatewayProtocolVersionExt, - SensitiveServerResponse, ServerResponse, SharedGatewayKey, SharedSymmetricKey, - CREDENTIAL_UPDATE_V2_PROTOCOL_VERSION, CURRENT_PROTOCOL_VERSION, + GatewayRequestsError, SensitiveServerResponse, ServerResponse, SharedGatewayKey, + SharedSymmetricKey, CREDENTIAL_UPDATE_V2_PROTOCOL_VERSION, CURRENT_PROTOCOL_VERSION, }; use nym_sphinx::forwarding::packet::MixPacket; use nym_statistics_common::clients::connection::ConnectionStatsEvent; @@ -662,6 +662,7 @@ impl GatewayClient { let supports_aes_gcm_siv = gw_protocol.supports_aes256_gcm_siv(); let supports_auth_v2 = gw_protocol.supports_authenticate_v2(); + let supports_key_rotation_info = gw_protocol.supports_key_rotation_packet(); if !supports_aes_gcm_siv { warn!("this gateway is on an old version that doesn't support AES256-GCM-SIV"); @@ -669,6 +670,9 @@ impl GatewayClient { if !supports_auth_v2 { warn!("this gateway is on an old version that doesn't support authentication v2") } + if !supports_key_rotation_info { + warn!("this gateway is on an old version that doesn't support key rotation packets") + } if self.authenticated { debug!("Already authenticated"); @@ -849,6 +853,22 @@ impl GatewayClient { } } + fn mix_packet_to_ws_message(&self, packet: MixPacket) -> Result { + // note: into_ws_message encrypts the requests and adds a MAC on it. Perhaps it should + // be more explicit in the naming? + let req = if self.negotiated_protocol.supports_key_rotation_packet() { + BinaryRequest::ForwardSphinxV2 { packet } + } else { + BinaryRequest::ForwardSphinx { packet } + }; + + req.into_ws_message( + self.shared_key + .as_ref() + .expect("no shared key present even though we're authenticated!"), + ) + } + pub async fn batch_send_mix_packets( &mut self, packets: Vec, @@ -877,13 +897,7 @@ impl GatewayClient { let messages: Result, _> = packets .into_iter() - .map(|mix_packet| { - BinaryRequest::ForwardSphinx { packet: mix_packet }.into_ws_message( - self.shared_key - .as_ref() - .expect("no shared key present even though we're authenticated!"), - ) - }) + .map(|mix_packet| self.mix_packet_to_ws_message(mix_packet)) .collect(); if let Err(err) = self @@ -949,13 +963,8 @@ impl GatewayClient { if !self.connection.is_established() { return Err(GatewayClientError::ConnectionNotEstablished); } - // note: into_ws_message encrypts the requests and adds a MAC on it. Perhaps it should - // be more explicit in the naming? - let msg = BinaryRequest::ForwardSphinx { packet: mix_packet }.into_ws_message( - self.shared_key - .as_ref() - .expect("no shared key present even though we're authenticated!"), - )?; + + let msg = self.mix_packet_to_ws_message(mix_packet)?; self.send_with_reconnection_on_failure(msg).await } diff --git a/common/client-libs/mixnet-client/src/client.rs b/common/client-libs/mixnet-client/src/client.rs index 8788a0aee70..1582c15bdaa 100644 --- a/common/client-libs/mixnet-client/src/client.rs +++ b/common/client-libs/mixnet-client/src/client.rs @@ -3,11 +3,9 @@ use dashmap::DashMap; use futures::StreamExt; -use nym_sphinx::addressing::nodes::NymNodeRoutingAddress; +use nym_sphinx::forwarding::packet::MixPacket; use nym_sphinx::framing::codec::NymCodec; use nym_sphinx::framing::packet::FramedNymPacket; -use nym_sphinx::params::PacketType; -use nym_sphinx::NymPacket; use std::io; use std::net::SocketAddr; use std::ops::Deref; @@ -49,12 +47,7 @@ impl Config { pub trait SendWithoutResponse { // Without response in this context means we will not listen for anything we might get back (not // that we should get anything), including any possible io errors - fn send_without_response( - &self, - address: NymNodeRoutingAddress, - packet: NymPacket, - packet_type: PacketType, - ) -> io::Result<()>; + fn send_without_response(&self, packet: MixPacket) -> io::Result<()>; } pub struct Client { @@ -65,7 +58,7 @@ pub struct Client { #[derive(Default, Clone)] pub struct ActiveConnections { - inner: Arc>, + inner: Arc>, } impl ActiveConnections { @@ -82,7 +75,7 @@ impl ActiveConnections { } impl Deref for ActiveConnections { - type Target = DashMap; + type Target = DashMap; fn deref(&self) -> &Self::Target { &self.inner } @@ -196,7 +189,7 @@ impl Client { } } - fn make_connection(&self, address: NymNodeRoutingAddress, pending_packet: FramedNymPacket) { + fn make_connection(&self, address: SocketAddr, pending_packet: FramedNymPacket) { let (sender, receiver) = mpsc::channel(self.config.maximum_connection_buffer_size); // this CAN'T fail because we just created the channel which has a non-zero capacity @@ -233,7 +226,7 @@ impl Client { connections_count.fetch_add(1, Ordering::SeqCst); ManagedConnection::new( - address.into(), + address, receiver, initial_connection_timeout, current_reconnection_attempt, @@ -246,18 +239,14 @@ impl Client { } impl SendWithoutResponse for Client { - fn send_without_response( - &self, - address: NymNodeRoutingAddress, - packet: NymPacket, - packet_type: PacketType, - ) -> io::Result<()> { - trace!("Sending packet to {address:?}"); - let framed_packet = FramedNymPacket::new(packet, packet_type); + fn send_without_response(&self, packet: MixPacket) -> io::Result<()> { + let address = packet.next_hop_address(); + trace!("Sending packet to {address}"); + let framed_packet = FramedNymPacket::from(packet); let Some(sender) = self.active_connections.get_mut(&address) else { // there was never a connection to begin with - debug!("establishing initial connection to {}", address); + debug!("establishing initial connection to {address}"); // it's not a 'big' error, but we did not manage to send the packet, but queue the packet // for sending for as soon as the connection is created self.make_connection(address, framed_packet); diff --git a/common/client-libs/validator-client/src/client.rs b/common/client-libs/validator-client/src/client.rs index ace9e396e61..ac3544ac232 100644 --- a/common/client-libs/validator-client/src/client.rs +++ b/common/client-libs/validator-client/src/client.rs @@ -25,7 +25,9 @@ use nym_api_requests::models::{ NymNodeDescription, RewardEstimationResponse, StakeSaturationResponse, }; use nym_api_requests::models::{LegacyDescribedGateway, MixNodeBondAnnotated}; -use nym_api_requests::nym_nodes::{NodesByAddressesResponse, SkimmedNode}; +use nym_api_requests::nym_nodes::{ + NodesByAddressesResponse, SkimmedNode, SkimmedNodesWithMetadata, +}; use nym_coconut_dkg_common::types::EpochId; use nym_http_api_client::UserAgent; use nym_mixnet_contract_common::EpochRewardedSet; @@ -46,6 +48,46 @@ use crate::rpc::http_client; #[cfg(feature = "http-client")] use crate::{DirectSigningHttpRpcValidatorClient, HttpRpcClient, QueryHttpRpcValidatorClient}; +// a simple helper macro to define to repeatedly call a paged query until a full response is constructed +macro_rules! collect_paged_skimmed_v2 { + ( $self:ident, $f: ident ) => {{ + // unroll first loop iteration in order to obtain the metadata + let mut page = 0; + let res = $self + .nym_api + .$f(false, Some(page), None, $self.use_bincode) + .await?; + let mut nodes = res.nodes.data; + let metadata = res.metadata; + + if res.nodes.pagination.total == nodes.len() { + return Ok(SkimmedNodesWithMetadata::new(nodes, metadata)); + } + + page += 1; + + loop { + let mut res = $self + .nym_api + .$f(false, Some(page), None, $self.use_bincode) + .await?; + + if metadata != res.metadata { + return Err(ValidatorClientError::InconsistentPagedMetadata); + } + + nodes.append(&mut res.nodes.data); + if nodes.len() < res.nodes.pagination.total { + page += 1 + } else { + break; + } + } + + Ok(SkimmedNodesWithMetadata::new(nodes, metadata)) + }}; +} + #[must_use] #[derive(Debug, Clone)] pub struct Config { @@ -425,103 +467,67 @@ impl NymApiClient { /// retrieve basic information for nodes are capable of operating as an entry gateway /// this includes legacy gateways and nym-nodes + #[deprecated(note = "use get_all_basic_entry_assigned_nodes_with_metadata instead")] pub async fn get_all_basic_entry_assigned_nodes( &self, ) -> Result, ValidatorClientError> { - // TODO: deal with paging in macro or some helper function or something, because it's the same pattern everywhere - let mut page = 0; - let mut nodes = Vec::new(); - - loop { - let mut res = self - .nym_api - .get_basic_entry_assigned_nodes(false, Some(page), None, self.use_bincode) - .await?; - - nodes.append(&mut res.nodes.data); - if nodes.len() < res.nodes.pagination.total { - page += 1 - } else { - break; - } - } + self.get_all_basic_entry_assigned_nodes_v2() + .await + .map(|res| res.nodes) + } - Ok(nodes) + pub async fn get_all_basic_entry_assigned_nodes_v2( + &self, + ) -> Result { + collect_paged_skimmed_v2!(self, get_basic_entry_assigned_nodes_v2) } /// retrieve basic information for nodes that got assigned 'mixing' node in this epoch /// this includes legacy mixnodes and nym-nodes + #[deprecated(note = "use get_all_basic_active_mixing_assigned_nodes_with_metadata instead")] pub async fn get_all_basic_active_mixing_assigned_nodes( &self, ) -> Result, ValidatorClientError> { - // TODO: deal with paging in macro or some helper function or something, because it's the same pattern everywhere - let mut page = 0; - let mut nodes = Vec::new(); - - loop { - let mut res = self - .nym_api - .get_basic_active_mixing_assigned_nodes(false, Some(page), None, self.use_bincode) - .await?; - - nodes.append(&mut res.nodes.data); - if nodes.len() < res.nodes.pagination.total { - page += 1 - } else { - break; - } - } + self.get_all_basic_active_mixing_assigned_nodes_with_metadata() + .await + .map(|res| res.nodes) + } - Ok(nodes) + pub async fn get_all_basic_active_mixing_assigned_nodes_with_metadata( + &self, + ) -> Result { + collect_paged_skimmed_v2!(self, get_basic_active_mixing_assigned_nodes_v2) } /// retrieve basic information for nodes are capable of operating as a mixnode /// this includes legacy mixnodes and nym-nodes + #[deprecated(note = "use get_all_basic_mixing_capable_nodes_with_metadata instead")] pub async fn get_all_basic_mixing_capable_nodes( &self, ) -> Result, ValidatorClientError> { - // TODO: deal with paging in macro or some helper function or something, because it's the same pattern everywhere - let mut page = 0; - let mut nodes = Vec::new(); - - loop { - let mut res = self - .nym_api - .get_basic_mixing_capable_nodes(false, Some(page), None, self.use_bincode) - .await?; - - nodes.append(&mut res.nodes.data); - if nodes.len() < res.nodes.pagination.total { - page += 1 - } else { - break; - } - } + self.get_all_basic_mixing_capable_nodes_with_metadata() + .await + .map(|res| res.nodes) + } - Ok(nodes) + pub async fn get_all_basic_mixing_capable_nodes_with_metadata( + &self, + ) -> Result { + collect_paged_skimmed_v2!(self, get_basic_mixing_capable_nodes_v2) } /// retrieve basic information for all bonded nodes on the network + #[deprecated(note = "use get_all_basic_nodes_with_metadata instead")] pub async fn get_all_basic_nodes(&self) -> Result, ValidatorClientError> { - // TODO: deal with paging in macro or some helper function or something, because it's the same pattern everywhere - let mut page = 0; - let mut nodes = Vec::new(); - - loop { - let mut res = self - .nym_api - .get_basic_nodes(false, Some(page), None, self.use_bincode) - .await?; - - nodes.append(&mut res.nodes.data); - if nodes.len() < res.nodes.pagination.total { - page += 1 - } else { - break; - } - } + self.get_all_basic_nodes_with_metadata() + .await + .map(|res| res.nodes) + } - Ok(nodes) + pub async fn get_all_basic_nodes_with_metadata( + &self, + ) -> Result { + collect_paged_skimmed_v2!(self, get_basic_nodes_v2) } pub async fn health(&self) -> Result { diff --git a/common/client-libs/validator-client/src/error.rs b/common/client-libs/validator-client/src/error.rs index 11bdb3d745f..6acbc73c788 100644 --- a/common/client-libs/validator-client/src/error.rs +++ b/common/client-libs/validator-client/src/error.rs @@ -22,6 +22,9 @@ pub enum ValidatorClientError { #[error("nyxd request failed: {0}")] NyxdError(#[from] crate::nyxd::error::NyxdError), + #[error("the response metadata has changed between pages")] + InconsistentPagedMetadata, + #[error("No validator API url has been provided")] NoAPIUrlAvailable, } diff --git a/common/client-libs/validator-client/src/nym_api/mod.rs b/common/client-libs/validator-client/src/nym_api/mod.rs index 6922e53dd45..846712b5def 100644 --- a/common/client-libs/validator-client/src/nym_api/mod.rs +++ b/common/client-libs/validator-client/src/nym_api/mod.rs @@ -14,11 +14,12 @@ use nym_api_requests::ecash::models::{ use nym_api_requests::ecash::VerificationKeyResponse; use nym_api_requests::models::{ AnnotationResponse, ApiHealthResponse, BinaryBuildInformationOwned, ChainStatusResponse, - LegacyDescribedMixNode, NodePerformanceResponse, NodeRefreshBody, NymNodeDescription, - PerformanceHistoryResponse, RewardedSetResponse, + KeyRotationInfoResponse, LegacyDescribedMixNode, NodePerformanceResponse, NodeRefreshBody, + NymNodeDescription, PerformanceHistoryResponse, RewardedSetResponse, }; use nym_api_requests::nym_nodes::{ - NodesByAddressesRequestBody, NodesByAddressesResponse, PaginatedCachedNodesResponse, + NodesByAddressesRequestBody, NodesByAddressesResponse, PaginatedCachedNodesResponseV1, + PaginatedCachedNodesResponseV2, }; use nym_api_requests::pagination::PaginatedResponse; pub use nym_api_requests::{ @@ -62,7 +63,7 @@ pub trait NymApiClientExt: ApiClient { async fn health(&self) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::API_STATUS_ROUTES, routes::HEALTH, ], @@ -75,7 +76,7 @@ pub trait NymApiClientExt: ApiClient { async fn build_information(&self) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::API_STATUS_ROUTES, routes::BUILD_INFORMATION, ], @@ -87,7 +88,7 @@ pub trait NymApiClientExt: ApiClient { #[deprecated] #[instrument(level = "debug", skip(self))] async fn get_mixnodes(&self) -> Result, NymAPIError> { - self.get_json(&[routes::API_VERSION, routes::MIXNODES], NO_PARAMS) + self.get_json(&[routes::V1_API_VERSION, routes::MIXNODES], NO_PARAMS) .await } @@ -96,7 +97,7 @@ pub trait NymApiClientExt: ApiClient { async fn get_mixnodes_detailed(&self) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::MIXNODES, routes::DETAILED, @@ -111,7 +112,7 @@ pub trait NymApiClientExt: ApiClient { async fn get_gateways_detailed(&self) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::GATEWAYS, routes::DETAILED, @@ -128,7 +129,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::GATEWAYS, routes::DETAILED_UNFILTERED, @@ -145,7 +146,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::MIXNODES, routes::DETAILED_UNFILTERED, @@ -158,7 +159,7 @@ pub trait NymApiClientExt: ApiClient { #[deprecated] #[instrument(level = "debug", skip(self))] async fn get_gateways(&self) -> Result, NymAPIError> { - self.get_json(&[routes::API_VERSION, routes::GATEWAYS], NO_PARAMS) + self.get_json(&[routes::V1_API_VERSION, routes::GATEWAYS], NO_PARAMS) .await } @@ -166,7 +167,7 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_gateways_described(&self) -> Result, NymAPIError> { self.get_json( - &[routes::API_VERSION, routes::GATEWAYS, routes::DESCRIBED], + &[routes::V1_API_VERSION, routes::GATEWAYS, routes::DESCRIBED], NO_PARAMS, ) .await @@ -176,7 +177,7 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_mixnodes_described(&self) -> Result, NymAPIError> { self.get_json( - &[routes::API_VERSION, routes::MIXNODES, routes::DESCRIBED], + &[routes::V1_API_VERSION, routes::MIXNODES, routes::DESCRIBED], NO_PARAMS, ) .await @@ -201,7 +202,7 @@ pub trait NymApiClientExt: ApiClient { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::NYM_NODES_ROUTES, routes::NYM_NODES_PERFORMANCE_HISTORY, &*node_id.to_string(), @@ -229,7 +230,7 @@ pub trait NymApiClientExt: ApiClient { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::NYM_NODES_ROUTES, routes::NYM_NODES_DESCRIBED, ], @@ -256,7 +257,7 @@ pub trait NymApiClientExt: ApiClient { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::NYM_NODES_ROUTES, routes::NYM_NODES_BONDED, ], @@ -270,7 +271,7 @@ pub trait NymApiClientExt: ApiClient { async fn get_basic_mixnodes(&self) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, "unstable", routes::NYM_NODES_ROUTES, "mixnodes", @@ -286,7 +287,7 @@ pub trait NymApiClientExt: ApiClient { async fn get_basic_gateways(&self) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, "unstable", routes::NYM_NODES_ROUTES, "gateways", @@ -301,7 +302,7 @@ pub trait NymApiClientExt: ApiClient { async fn get_rewarded_set(&self) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::NYM_NODES_ROUTES, routes::NYM_NODES_REWARDED_SET, ], @@ -312,6 +313,7 @@ pub trait NymApiClientExt: ApiClient { /// retrieve basic information for nodes are capable of operating as an entry gateway /// this includes legacy gateways and nym-nodes + #[deprecated(note = "use get_basic_entry_assigned_nodes_v2")] #[instrument(level = "debug", skip(self))] async fn get_basic_entry_assigned_nodes( &self, @@ -319,7 +321,7 @@ pub trait NymApiClientExt: ApiClient { page: Option, per_page: Option, use_bincode: bool, - ) -> Result, NymAPIError> { + ) -> Result, NymAPIError> { let mut params = Vec::new(); if no_legacy { @@ -340,7 +342,49 @@ pub trait NymApiClientExt: ApiClient { self.get_response( &[ - routes::API_VERSION, + routes::V1_API_VERSION, + "unstable", + routes::NYM_NODES_ROUTES, + "skimmed", + "entry-gateways", + "all", + ], + ¶ms, + ) + .await + } + + /// retrieve basic information for nodes are capable of operating as an entry gateway + /// this includes legacy gateways and nym-nodes + #[instrument(level = "debug", skip(self))] + async fn get_basic_entry_assigned_nodes_v2( + &self, + no_legacy: bool, + page: Option, + per_page: Option, + use_bincode: bool, + ) -> Result, NymAPIError> { + let mut params = Vec::new(); + + if no_legacy { + params.push(("no_legacy", "true".to_string())) + } + + if let Some(page) = page { + params.push(("page", page.to_string())) + } + + if let Some(per_page) = per_page { + params.push(("per_page", per_page.to_string())) + } + + if use_bincode { + params.push(("output", "bincode".to_string())) + } + + self.get_response( + &[ + routes::V2_API_VERSION, "unstable", routes::NYM_NODES_ROUTES, "skimmed", @@ -354,6 +398,7 @@ pub trait NymApiClientExt: ApiClient { /// retrieve basic information for nodes that got assigned 'mixing' node in this epoch /// this includes legacy mixnodes and nym-nodes + #[deprecated(note = "use get_basic_active_mixing_assigned_nodes_v2")] #[instrument(level = "debug", skip(self))] async fn get_basic_active_mixing_assigned_nodes( &self, @@ -361,7 +406,7 @@ pub trait NymApiClientExt: ApiClient { page: Option, per_page: Option, use_bincode: bool, - ) -> Result, NymAPIError> { + ) -> Result, NymAPIError> { let mut params = Vec::new(); if no_legacy { @@ -382,7 +427,7 @@ pub trait NymApiClientExt: ApiClient { self.get_response( &[ - routes::API_VERSION, + routes::V1_API_VERSION, "unstable", routes::NYM_NODES_ROUTES, "skimmed", @@ -397,13 +442,56 @@ pub trait NymApiClientExt: ApiClient { /// retrieve basic information for nodes that got assigned 'mixing' node in this epoch /// this includes legacy mixnodes and nym-nodes #[instrument(level = "debug", skip(self))] + async fn get_basic_active_mixing_assigned_nodes_v2( + &self, + no_legacy: bool, + page: Option, + per_page: Option, + use_bincode: bool, + ) -> Result, NymAPIError> { + let mut params = Vec::new(); + + if no_legacy { + params.push(("no_legacy", "true".to_string())) + } + + if let Some(page) = page { + params.push(("page", page.to_string())) + } + + if let Some(per_page) = per_page { + params.push(("per_page", per_page.to_string())) + } + + if use_bincode { + params.push(("output", "bincode".to_string())) + } + + self.get_response( + &[ + routes::V2_API_VERSION, + "unstable", + routes::NYM_NODES_ROUTES, + "skimmed", + "mixnodes", + "active", + ], + ¶ms, + ) + .await + } + + /// retrieve basic information for nodes that got assigned 'mixing' node in this epoch + /// this includes legacy mixnodes and nym-nodes + #[deprecated(note = "use get_basic_mixing_capable_nodes_v2")] + #[instrument(level = "debug", skip(self))] async fn get_basic_mixing_capable_nodes( &self, no_legacy: bool, page: Option, per_page: Option, use_bincode: bool, - ) -> Result, NymAPIError> { + ) -> Result, NymAPIError> { let mut params = Vec::new(); if no_legacy { @@ -424,7 +512,7 @@ pub trait NymApiClientExt: ApiClient { self.get_response( &[ - routes::API_VERSION, + routes::V1_API_VERSION, "unstable", routes::NYM_NODES_ROUTES, "skimmed", @@ -436,6 +524,49 @@ pub trait NymApiClientExt: ApiClient { .await } + /// retrieve basic information for nodes that got assigned 'mixing' node in this epoch + /// this includes legacy mixnodes and nym-nodes + #[instrument(level = "debug", skip(self))] + async fn get_basic_mixing_capable_nodes_v2( + &self, + no_legacy: bool, + page: Option, + per_page: Option, + use_bincode: bool, + ) -> Result, NymAPIError> { + let mut params = Vec::new(); + + if no_legacy { + params.push(("no_legacy", "true".to_string())) + } + + if let Some(page) = page { + params.push(("page", page.to_string())) + } + + if let Some(per_page) = per_page { + params.push(("per_page", per_page.to_string())) + } + + if use_bincode { + params.push(("output", "bincode".to_string())) + } + + self.get_response( + &[ + routes::V2_API_VERSION, + "unstable", + routes::NYM_NODES_ROUTES, + "skimmed", + "mixnodes", + "all", + ], + ¶ms, + ) + .await + } + + #[deprecated(note = "use get_basic_nodes_v2")] #[instrument(level = "debug", skip(self))] async fn get_basic_nodes( &self, @@ -443,7 +574,45 @@ pub trait NymApiClientExt: ApiClient { page: Option, per_page: Option, use_bincode: bool, - ) -> Result, NymAPIError> { + ) -> Result, NymAPIError> { + let mut params = Vec::new(); + + if no_legacy { + params.push(("no_legacy", "true".to_string())) + } + + if let Some(page) = page { + params.push(("page", page.to_string())) + } + + if let Some(per_page) = per_page { + params.push(("per_page", per_page.to_string())) + } + + if use_bincode { + params.push(("output", "bincode".to_string())) + } + + self.get_response( + &[ + routes::V1_API_VERSION, + "unstable", + routes::NYM_NODES_ROUTES, + "skimmed", + ], + ¶ms, + ) + .await + } + + #[instrument(level = "debug", skip(self))] + async fn get_basic_nodes_v2( + &self, + no_legacy: bool, + page: Option, + per_page: Option, + use_bincode: bool, + ) -> Result, NymAPIError> { let mut params = Vec::new(); if no_legacy { @@ -464,7 +633,7 @@ pub trait NymApiClientExt: ApiClient { self.get_response( &[ - routes::API_VERSION, + routes::V2_API_VERSION, "unstable", routes::NYM_NODES_ROUTES, "skimmed", @@ -478,7 +647,7 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_active_mixnodes(&self) -> Result, NymAPIError> { self.get_json( - &[routes::API_VERSION, routes::MIXNODES, routes::ACTIVE], + &[routes::V1_API_VERSION, routes::MIXNODES, routes::ACTIVE], NO_PARAMS, ) .await @@ -489,7 +658,7 @@ pub trait NymApiClientExt: ApiClient { async fn get_active_mixnodes_detailed(&self) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::MIXNODES, routes::ACTIVE, @@ -504,7 +673,7 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_rewarded_mixnodes(&self) -> Result, NymAPIError> { self.get_json( - &[routes::API_VERSION, routes::MIXNODES, routes::REWARDED], + &[routes::V1_API_VERSION, routes::MIXNODES, routes::REWARDED], NO_PARAMS, ) .await @@ -518,7 +687,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::MIXNODE, &mix_id.to_string(), @@ -537,7 +706,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::GATEWAY, identity, @@ -556,7 +725,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::MIXNODE, &mix_id.to_string(), @@ -575,7 +744,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::GATEWAY, identity, @@ -593,7 +762,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result, NymAPIError> { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS, routes::MIXNODES, routes::REWARDED, @@ -614,7 +783,7 @@ pub trait NymApiClientExt: ApiClient { if let Some(since) = since { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::GATEWAY, identity, @@ -626,7 +795,7 @@ pub trait NymApiClientExt: ApiClient { } else { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::GATEWAY, identity, @@ -647,7 +816,7 @@ pub trait NymApiClientExt: ApiClient { if let Some(since) = since { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -659,7 +828,7 @@ pub trait NymApiClientExt: ApiClient { } else { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -679,7 +848,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -698,7 +867,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -718,7 +887,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -738,7 +907,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -758,7 +927,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -776,7 +945,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::NYM_NODES_ROUTES, routes::NYM_NODES_PERFORMANCE, &node_id.to_string(), @@ -792,7 +961,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::NYM_NODES_ROUTES, routes::NYM_NODES_ANNOTATION, &node_id.to_string(), @@ -806,7 +975,7 @@ pub trait NymApiClientExt: ApiClient { async fn get_mixnode_avg_uptime(&self, mix_id: NodeId) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::STATUS_ROUTES, routes::MIXNODE, &mix_id.to_string(), @@ -821,7 +990,11 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_mixnodes_blacklisted(&self) -> Result, NymAPIError> { self.get_json( - &[routes::API_VERSION, routes::MIXNODES, routes::BLACKLISTED], + &[ + routes::V1_API_VERSION, + routes::MIXNODES, + routes::BLACKLISTED, + ], NO_PARAMS, ) .await @@ -831,7 +1004,11 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_gateways_blacklisted(&self) -> Result, NymAPIError> { self.get_json( - &[routes::API_VERSION, routes::GATEWAYS, routes::BLACKLISTED], + &[ + routes::V1_API_VERSION, + routes::GATEWAYS, + routes::BLACKLISTED, + ], NO_PARAMS, ) .await @@ -844,7 +1021,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::ECASH_BLIND_SIGN, ], @@ -861,7 +1038,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::VERIFY_ECASH_TICKET, ], @@ -878,7 +1055,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::BATCH_REDEEM_ECASH_TICKETS, ], @@ -903,7 +1080,7 @@ pub trait NymApiClientExt: ApiClient { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::PARTIAL_EXPIRATION_DATE_SIGNATURES, ], @@ -924,7 +1101,7 @@ pub trait NymApiClientExt: ApiClient { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::PARTIAL_COIN_INDICES_SIGNATURES, ], @@ -948,7 +1125,7 @@ pub trait NymApiClientExt: ApiClient { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::GLOBAL_EXPIRATION_DATE_SIGNATURES, ], @@ -969,7 +1146,7 @@ pub trait NymApiClientExt: ApiClient { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::GLOBAL_COIN_INDICES_SIGNATURES, ], @@ -989,7 +1166,7 @@ pub trait NymApiClientExt: ApiClient { }; self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, ecash::MASTER_VERIFICATION_KEY, ], @@ -1005,7 +1182,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result<(), NymAPIError> { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::NYM_NODES_ROUTES, routes::NYM_NODES_REFRESH_DESCRIBED, ], @@ -1022,7 +1199,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::ECASH_ISSUED_TICKETBOOKS_FOR, &expiration_date.to_string(), @@ -1039,7 +1216,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.get_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::ECASH_ISSUED_TICKETBOOKS_FOR_COUNT, &expiration_date.to_string(), @@ -1056,7 +1233,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::ECASH_ISSUED_TICKETBOOKS_CHALLENGE_COMMITMENT, ], @@ -1073,7 +1250,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, routes::ECASH_ROUTES, routes::ECASH_ISSUED_TICKETBOOKS_DATA, ], @@ -1089,7 +1266,7 @@ pub trait NymApiClientExt: ApiClient { ) -> Result { self.post_json( &[ - routes::API_VERSION, + routes::V1_API_VERSION, "unstable", routes::NYM_NODES_ROUTES, routes::nym_nodes::BY_ADDRESSES, @@ -1103,7 +1280,7 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_network_details(&self) -> Result { self.get_json( - &[routes::API_VERSION, routes::NETWORK, routes::DETAILS], + &[routes::V1_API_VERSION, routes::NETWORK, routes::DETAILS], NO_PARAMS, ) .await @@ -1112,7 +1289,24 @@ pub trait NymApiClientExt: ApiClient { #[instrument(level = "debug", skip(self))] async fn get_chain_status(&self) -> Result { self.get_json( - &[routes::API_VERSION, routes::NETWORK, routes::CHAIN_STATUS], + &[ + routes::V1_API_VERSION, + routes::NETWORK, + routes::CHAIN_STATUS, + ], + NO_PARAMS, + ) + .await + } + + #[instrument(level = "debug", skip(self))] + async fn get_key_rotation_info(&self) -> Result { + self.get_json( + &[ + routes::V1_API_VERSION, + routes::EPOCH, + routes::KEY_ROTATION_INFO, + ], NO_PARAMS, ) .await diff --git a/common/client-libs/validator-client/src/nym_api/routes.rs b/common/client-libs/validator-client/src/nym_api/routes.rs index 320e9904ec9..d0f93b5093a 100644 --- a/common/client-libs/validator-client/src/nym_api/routes.rs +++ b/common/client-libs/validator-client/src/nym_api/routes.rs @@ -1,9 +1,8 @@ // Copyright 2021 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -use nym_network_defaults::NYM_API_VERSION; - -pub const API_VERSION: &str = NYM_API_VERSION; +pub const V1_API_VERSION: &str = "v1"; +pub const V2_API_VERSION: &str = "v2"; pub const MIXNODES: &str = "mixnodes"; pub const GATEWAYS: &str = "gateways"; pub const DESCRIBED: &str = "described"; @@ -79,3 +78,11 @@ pub const SERVICE_PROVIDERS: &str = "services"; pub const DETAILS: &str = "details"; pub const CHAIN_STATUS: &str = "chain-status"; pub const NETWORK: &str = "network"; + +pub const EPOCH: &str = "epoch"; + +pub use epoch_routes::*; +pub mod epoch_routes { + pub const CURRENT: &str = "current"; + pub const KEY_ROTATION_INFO: &str = "key-rotation-info"; +} diff --git a/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs b/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs index 5060fe3df47..9f1bb71ad41 100644 --- a/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs +++ b/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs @@ -12,8 +12,8 @@ use nym_mixnet_contract_common::gateway::{PreassignedGatewayIdsResponse, Preassi use nym_mixnet_contract_common::nym_node::{ EpochAssignmentResponse, NodeDetailsByIdentityResponse, NodeDetailsResponse, NodeOwnershipResponse, NodeRewardingDetailsResponse, PagedNymNodeBondsResponse, - PagedNymNodeDetailsResponse, PagedUnbondedNymNodesResponse, Role, RolesMetadataResponse, - StakeSaturationResponse, UnbondedNodeResponse, UnbondedNymNode, + PagedNymNodeDetailsResponse, PagedUnbondedNymNodesResponse, RewardedSetMetadata, Role, + RolesMetadataResponse, StakeSaturationResponse, UnbondedNodeResponse, UnbondedNymNode, }; use nym_mixnet_contract_common::reward_params::WorkFactor; use nym_mixnet_contract_common::{ @@ -28,12 +28,12 @@ use nym_mixnet_contract_common::{ ContractBuildInformation, ContractState, ContractStateParams, CurrentIntervalResponse, CurrentNymNodeVersionResponse, Delegation, EpochEventId, EpochRewardedSet, EpochStatus, GatewayBond, GatewayBondResponse, GatewayOwnershipResponse, HistoricalNymNodeVersionEntry, - IdentityKey, IdentityKeyRef, IntervalEventId, MixNodeBond, MixNodeDetails, - MixOwnershipResponse, MixnodeDetailsByIdentityResponse, MixnodeDetailsResponse, NodeId, - NumberOfPendingEventsResponse, NymNodeBond, NymNodeDetails, NymNodeVersionHistoryResponse, - PagedAllDelegationsResponse, PagedDelegatorDelegationsResponse, PagedGatewayResponse, - PagedMixnodeBondsResponse, PagedNodeDelegationsResponse, PendingEpochEvent, - PendingEpochEventResponse, PendingEpochEventsResponse, PendingIntervalEvent, + IdentityKey, IdentityKeyRef, IntervalEventId, KeyRotationIdResponse, KeyRotationState, + MixNodeBond, MixNodeDetails, MixOwnershipResponse, MixnodeDetailsByIdentityResponse, + MixnodeDetailsResponse, NodeId, NumberOfPendingEventsResponse, NymNodeBond, NymNodeDetails, + NymNodeVersionHistoryResponse, PagedAllDelegationsResponse, PagedDelegatorDelegationsResponse, + PagedGatewayResponse, PagedMixnodeBondsResponse, PagedNodeDelegationsResponse, + PendingEpochEvent, PendingEpochEventResponse, PendingEpochEventsResponse, PendingIntervalEvent, PendingIntervalEventResponse, PendingIntervalEventsResponse, QueryMsg as MixnetQueryMsg, RewardedSet, UnbondedMixnode, }; @@ -546,6 +546,16 @@ pub trait MixnetQueryClient { }) .await } + + async fn get_key_rotation_state(&self) -> Result { + self.query_mixnet_contract(MixnetQueryMsg::GetKeyRotationState {}) + .await + } + + async fn get_key_rotation_id(&self) -> Result { + self.query_mixnet_contract(MixnetQueryMsg::GetKeyRotationId {}) + .await + } } // extension trait to the query client to deal with the paged queries @@ -673,12 +683,20 @@ pub trait MixnetQueryClientExt: MixnetQueryClient { async fn get_rewarded_set(&self) -> Result { let error_response = |message| Err(NyxdError::extension_query_failure("mixnet", message)); + // bypass for catch 22 for fresh contracts. we can't refresh cache because there's no rewarded set, + // but we can't set the rewarded set because we didn't refresh the cache let metadata = self.get_rewarded_set_metadata().await?; - if !metadata.metadata.fully_assigned { + + let is_default = metadata.metadata == RewardedSetMetadata::default(); + if !metadata.metadata.fully_assigned && !is_default { return error_response("the rewarded set hasn't been fully assigned for this epoch"); } let expected_epoch_id = metadata.metadata.epoch_id; + if is_default { + return Ok(Default::default()); + } + // if we have to query those things more frequently, we could do it concurrently, // but as it stands now, it happens so infrequently it might as well be sequential let entry = self.get_role_assignment(Role::EntryGateway).await?; diff --git a/common/client-libs/validator-client/src/nyxd/mod.rs b/common/client-libs/validator-client/src/nyxd/mod.rs index 03746e305e7..5f7cbb8e7c8 100644 --- a/common/client-libs/validator-client/src/nyxd/mod.rs +++ b/common/client-libs/validator-client/src/nyxd/mod.rs @@ -138,6 +138,14 @@ impl NyxdClient { config, }) } + + pub fn connect_to_default_env(endpoint: U) -> Result + where + U: TryInto, + { + let config = Config::try_from_nym_network_details(&NymNetworkDetails::new_from_env())?; + Self::connect(config, endpoint) + } } impl NyxdClient { diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/Cargo.toml b/common/cosmwasm-smart-contracts/mixnet-contract/Cargo.toml index 8800e72b20c..abe502c50ee 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/Cargo.toml +++ b/common/cosmwasm-smart-contracts/mixnet-contract/Cargo.toml @@ -40,3 +40,6 @@ contract-testing = [] utoipa = ["dep:utoipa"] schema = ["cw2"] generate-ts = ['ts-rs'] + +[lints] +workspace = true \ No newline at end of file diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/error.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/error.rs index e8349d4eb0e..8104a1c9100 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/error.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/error.rs @@ -193,6 +193,9 @@ pub enum MixnetContractError { #[error("attempted to perform the operation with 0 coins. This is not allowed")] ZeroCoinAmount, + #[error("key rotation validity below minimum value")] + TooShortRotationInterval, + #[error("this validator ({current_validator}) is not the one responsible for advancing this epoch. It's responsibility of {chosen_validator}.")] RewardingValidatorMismatch { current_validator: Addr, diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/interval.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/interval.rs index 2259af4c7a9..39cd180bf8f 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/interval.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/interval.rs @@ -358,7 +358,7 @@ impl Interval { self.total_elapsed_epochs } - pub const fn current_epoch_absolute_id(&self) -> u32 { + pub const fn current_epoch_absolute_id(&self) -> EpochId { // since we count epochs starting from 0, if n epochs have elapsed, the current one has absolute id of n self.total_elapsed_epochs } diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/key_rotation.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/key_rotation.rs new file mode 100644 index 00000000000..3c32b99e2f6 --- /dev/null +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/key_rotation.rs @@ -0,0 +1,155 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use crate::EpochId; +use cosmwasm_schema::cw_serde; + +pub type KeyRotationId = u32; + +#[cw_serde] +#[derive(Copy)] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] +pub struct KeyRotationState { + /// Defines how long each key rotation is valid for (in terms of epochs) + pub validity_epochs: u32, + + /// Records the initial epoch_id when the key rotation has been introduced (0 for fresh contracts). + /// It is used for determining when rotation is meant to advance. + #[cfg_attr(feature = "utoipa", schema(value_type = u32))] + pub initial_epoch_id: EpochId, +} + +impl KeyRotationState { + pub fn key_rotation_id(&self, current_epoch_id: EpochId) -> KeyRotationId { + let diff = current_epoch_id.saturating_sub(self.initial_epoch_id); + diff / self.validity_epochs + } + + pub fn next_rotation_starting_epoch_id(&self, current_epoch_id: EpochId) -> EpochId { + self.current_rotation_starting_epoch_id(current_epoch_id) + self.validity_epochs + } + + pub fn current_rotation_starting_epoch_id(&self, current_epoch_id: EpochId) -> EpochId { + let current_rotation_id = self.key_rotation_id(current_epoch_id); + + self.initial_epoch_id + self.validity_epochs * current_rotation_id + } +} + +#[cw_serde] +pub struct KeyRotationIdResponse { + pub rotation_id: KeyRotationId, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn key_rotation_id() { + let state = KeyRotationState { + validity_epochs: 24, + initial_epoch_id: 0, + }; + assert_eq!(0, state.key_rotation_id(0)); + assert_eq!(0, state.key_rotation_id(23)); + assert_eq!(1, state.key_rotation_id(24)); + assert_eq!(1, state.key_rotation_id(47)); + assert_eq!(2, state.key_rotation_id(48)); + + let state = KeyRotationState { + validity_epochs: 12, + initial_epoch_id: 0, + }; + assert_eq!(0, state.key_rotation_id(0)); + assert_eq!(0, state.key_rotation_id(11)); + assert_eq!(1, state.key_rotation_id(12)); + assert_eq!(1, state.key_rotation_id(23)); + assert_eq!(2, state.key_rotation_id(24)); + + let state = KeyRotationState { + validity_epochs: 24, + initial_epoch_id: 10000, + }; + assert_eq!(0, state.key_rotation_id(123)); + assert_eq!(0, state.key_rotation_id(10000)); + assert_eq!(0, state.key_rotation_id(10001)); + assert_eq!(0, state.key_rotation_id(10023)); + assert_eq!(1, state.key_rotation_id(10024)); + assert_eq!(1, state.key_rotation_id(10047)); + assert_eq!(2, state.key_rotation_id(10048)); + assert_eq!(2, state.key_rotation_id(10060)); + } + + #[test] + fn next_rotation_starting_epoch_id() { + let state = KeyRotationState { + validity_epochs: 24, + initial_epoch_id: 0, + }; + assert_eq!(24, state.next_rotation_starting_epoch_id(0)); + assert_eq!(24, state.next_rotation_starting_epoch_id(23)); + assert_eq!(48, state.next_rotation_starting_epoch_id(24)); + assert_eq!(48, state.next_rotation_starting_epoch_id(47)); + assert_eq!(72, state.next_rotation_starting_epoch_id(48)); + + let state = KeyRotationState { + validity_epochs: 12, + initial_epoch_id: 0, + }; + assert_eq!(12, state.next_rotation_starting_epoch_id(0)); + assert_eq!(12, state.next_rotation_starting_epoch_id(11)); + assert_eq!(24, state.next_rotation_starting_epoch_id(12)); + assert_eq!(24, state.next_rotation_starting_epoch_id(23)); + assert_eq!(36, state.next_rotation_starting_epoch_id(24)); + + let state = KeyRotationState { + validity_epochs: 24, + initial_epoch_id: 10000, + }; + assert_eq!(10024, state.next_rotation_starting_epoch_id(123)); + assert_eq!(10024, state.next_rotation_starting_epoch_id(10000)); + assert_eq!(10024, state.next_rotation_starting_epoch_id(10001)); + assert_eq!(10024, state.next_rotation_starting_epoch_id(10023)); + assert_eq!(10048, state.next_rotation_starting_epoch_id(10024)); + assert_eq!(10048, state.next_rotation_starting_epoch_id(10047)); + assert_eq!(10072, state.next_rotation_starting_epoch_id(10048)); + assert_eq!(10072, state.next_rotation_starting_epoch_id(10060)); + } + + #[test] + fn current_rotation_starting_epoch_id() { + let state = KeyRotationState { + validity_epochs: 24, + initial_epoch_id: 0, + }; + assert_eq!(0, state.current_rotation_starting_epoch_id(0)); + assert_eq!(0, state.current_rotation_starting_epoch_id(23)); + assert_eq!(24, state.current_rotation_starting_epoch_id(24)); + assert_eq!(24, state.current_rotation_starting_epoch_id(47)); + assert_eq!(48, state.current_rotation_starting_epoch_id(48)); + + let state = KeyRotationState { + validity_epochs: 12, + initial_epoch_id: 0, + }; + assert_eq!(0, state.current_rotation_starting_epoch_id(0)); + assert_eq!(0, state.current_rotation_starting_epoch_id(11)); + assert_eq!(12, state.current_rotation_starting_epoch_id(12)); + assert_eq!(12, state.current_rotation_starting_epoch_id(23)); + assert_eq!(24, state.current_rotation_starting_epoch_id(24)); + + let state = KeyRotationState { + validity_epochs: 24, + initial_epoch_id: 10000, + }; + assert_eq!(10000, state.current_rotation_starting_epoch_id(123)); + assert_eq!(10000, state.current_rotation_starting_epoch_id(10000)); + assert_eq!(10000, state.current_rotation_starting_epoch_id(10001)); + assert_eq!(10000, state.current_rotation_starting_epoch_id(10023)); + assert_eq!(10024, state.current_rotation_starting_epoch_id(10024)); + assert_eq!(10024, state.current_rotation_starting_epoch_id(10047)); + assert_eq!(10048, state.current_rotation_starting_epoch_id(10048)); + assert_eq!(10048, state.current_rotation_starting_epoch_id(10060)); + } +} diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/lib.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/lib.rs index 4f9d7a088fb..eb1d533270c 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/lib.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/lib.rs @@ -1,10 +1,6 @@ // Copyright 2021-2023 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -#![warn(clippy::expect_used)] -#![warn(clippy::unwrap_used)] -#![warn(clippy::todo)] - mod config_score; pub mod constants; pub mod delegation; @@ -13,6 +9,7 @@ pub mod events; pub mod gateway; pub mod helpers; pub mod interval; +pub mod key_rotation; pub mod mixnode; pub mod msg; pub mod nym_node; @@ -37,6 +34,7 @@ pub use gateway::{ pub use interval::{ CurrentIntervalResponse, EpochId, EpochState, EpochStatus, Interval, IntervalId, }; +pub use key_rotation::*; pub use mixnode::{ LegacyMixLayer, MixNode, MixNodeBond, MixNodeConfigUpdate, MixNodeDetails, MixOwnershipResponse, MixnodeDetailsByIdentityResponse, MixnodeDetailsResponse, NodeCostParams, diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs index 763082a1f17..64b69fe737e 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs @@ -170,6 +170,11 @@ impl NodeRewarding { } } + // we panic here as opposed to returning an error as this is undefined behaviour, + // because the pledge amount has decreased (i.e. slashing has occurred) which + // should not be possible under any situation. at this point we don't know how many other things + // might have failed so we have to bail + #[allow(clippy::panic)] pub fn pending_detailed_operator_reward(&self, original_pledge: &Coin) -> StdResult { let initial_dec = original_pledge.amount.into_base_decimal()?; if initial_dec > self.operator { @@ -189,6 +194,11 @@ impl NodeRewarding { Ok(truncate_reward(delegator_reward, &delegation.amount.denom)) } + // we panic here as opposed to returning an error as this is undefined behaviour, + // because the pledge amount has decreased (i.e. slashing has occurred) which + // should not be possible under any situation. at this point we don't know how many other things + // might have failed so we have to bail + #[allow(clippy::panic)] pub fn withdraw_operator_reward( &mut self, original_pledge: &Coin, diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/msg.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/msg.rs index 1f32e71441b..0fedbd84e9a 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/msg.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/msg.rs @@ -35,6 +35,7 @@ use crate::{ PreassignedGatewayIdsResponse, }, interval::{CurrentIntervalResponse, EpochStatus}, + key_rotation::{KeyRotationIdResponse, KeyRotationState}, mixnode::{ MixOwnershipResponse, MixStakeSaturationResponse, MixnodeDetailsByIdentityResponse, MixnodeDetailsResponse, MixnodeRewardingDetailsResponse, PagedMixnodeBondsResponse, @@ -81,6 +82,18 @@ pub struct InstantiateMsg { #[serde(default)] pub interval_operating_cost: OperatingCostRange, + + #[serde(default)] + pub key_validity_in_epochs: Option, +} + +impl InstantiateMsg { + // needs to give us enough time to pre-announce key for following epoch + // and have an overlap with the preceding epoch + pub const MIN_KEY_ROTATION_VALIDITY: u32 = 3; + pub fn key_validity_in_epochs(&self) -> u32 { + self.key_validity_in_epochs.unwrap_or(24) + } } #[cw_serde] @@ -857,6 +870,15 @@ pub enum QueryMsg { /// Cosmos address used for the query of the signing nonce. address: String, }, + + // sphinx key rotation-related + #[cfg_attr(feature = "schema", returns(KeyRotationState))] + /// Gets the current state config of the key rotation (i.e. starting epoch id and validity duration) + GetKeyRotationState {}, + + /// Gets the current key rotation id + #[cfg_attr(feature = "schema", returns(KeyRotationIdResponse))] + GetKeyRotationId {}, } #[cw_serde] diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/simulator/mod.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/simulator/mod.rs index 9ad2a3d02a0..25f588b4768 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/simulator/mod.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/simulator/mod.rs @@ -151,6 +151,9 @@ impl Simulator { } } + // this code is not meant to be used in production systems, only in tests + // so a panic due to inconsistent arguments is fine + #[allow(clippy::panic)] pub fn simulate_epoch( &mut self, node_params: &BTreeMap, diff --git a/common/gateway-requests/src/lib.rs b/common/gateway-requests/src/lib.rs index 0d300bc4a42..bdb9a30a0f1 100644 --- a/common/gateway-requests/src/lib.rs +++ b/common/gateway-requests/src/lib.rs @@ -19,7 +19,7 @@ pub use shared_key::{ SharedGatewayKey, SharedKeyConversionError, SharedKeyUsageError, SharedSymmetricKey, }; -pub const CURRENT_PROTOCOL_VERSION: u8 = AUTHENTICATE_V2_PROTOCOL_VERSION; +pub const CURRENT_PROTOCOL_VERSION: u8 = EMBEDDED_KEY_ROTATION_INFO_VERSION; /// Defines the current version of the communication protocol between gateway and clients. /// It has to be incremented for any breaking change. @@ -28,10 +28,12 @@ pub const CURRENT_PROTOCOL_VERSION: u8 = AUTHENTICATE_V2_PROTOCOL_VERSION; // 2 - changes to client credentials structure // 3 - change to AES-GCM-SIV and non-zero IVs // 4 - introduction of v2 authentication protocol to prevent reply attacks +// 5 - add key rotation information to the serialised mix packet pub const INITIAL_PROTOCOL_VERSION: u8 = 1; pub const CREDENTIAL_UPDATE_V2_PROTOCOL_VERSION: u8 = 2; pub const AES_GCM_SIV_PROTOCOL_VERSION: u8 = 3; pub const AUTHENTICATE_V2_PROTOCOL_VERSION: u8 = 4; +pub const EMBEDDED_KEY_ROTATION_INFO_VERSION: u8 = 5; // TODO: could using `Mac` trait here for OutputSize backfire? // Should hmac itself be exposed, imported and used instead? @@ -40,6 +42,7 @@ pub type LegacyGatewayMacSize = bool; fn supports_authenticate_v2(&self) -> bool; + fn supports_key_rotation_packet(&self) -> bool; } impl GatewayProtocolVersionExt for Option { @@ -52,4 +55,9 @@ impl GatewayProtocolVersionExt for Option { let Some(protocol) = *self else { return false }; protocol >= AUTHENTICATE_V2_PROTOCOL_VERSION } + + fn supports_key_rotation_packet(&self) -> bool { + let Some(protocol) = *self else { return false }; + protocol >= EMBEDDED_KEY_ROTATION_INFO_VERSION + } } diff --git a/common/gateway-requests/src/types/binary_request.rs b/common/gateway-requests/src/types/binary_request.rs index 17eab6c9cb2..b52ee819cd7 100644 --- a/common/gateway-requests/src/types/binary_request.rs +++ b/common/gateway-requests/src/types/binary_request.rs @@ -11,6 +11,9 @@ use tungstenite::Message; #[non_exhaustive] pub enum BinaryRequest { ForwardSphinx { packet: MixPacket }, + + // identical to `ForwardSphinx`, but also contains information about sphinx key rotation used + ForwardSphinxV2 { packet: MixPacket }, } #[repr(u8)] @@ -18,6 +21,9 @@ pub enum BinaryRequest { #[non_exhaustive] pub enum BinaryRequestKind { ForwardSphinx = 1, + + // identical to `ForwardSphinx`, but also contains information about sphinx key rotation used + ForwardSphinxV2 = 2, } // Right now the only valid `BinaryRequest` is a request to forward a sphinx packet. @@ -29,6 +35,7 @@ impl BinaryRequest { pub fn kind(&self) -> BinaryRequestKind { match self { BinaryRequest::ForwardSphinx { .. } => BinaryRequestKind::ForwardSphinx, + BinaryRequest::ForwardSphinxV2 { .. } => BinaryRequestKind::ForwardSphinxV2, } } @@ -38,9 +45,13 @@ impl BinaryRequest { ) -> Result { match kind { BinaryRequestKind::ForwardSphinx => { - let packet = MixPacket::try_from_bytes(plaintext)?; + let packet = MixPacket::try_from_v1_bytes(plaintext)?; Ok(BinaryRequest::ForwardSphinx { packet }) } + BinaryRequestKind::ForwardSphinxV2 => { + let packet = MixPacket::try_from_v2_bytes(plaintext)?; + Ok(BinaryRequest::ForwardSphinxV2 { packet }) + } } } @@ -58,7 +69,8 @@ impl BinaryRequest { let kind = self.kind(); let plaintext = match self { - BinaryRequest::ForwardSphinx { packet } => packet.into_bytes()?, + BinaryRequest::ForwardSphinx { packet } => packet.into_v1_bytes()?, + BinaryRequest::ForwardSphinxV2 { packet } => packet.into_v2_bytes()?, }; BinaryData::make_encrypted_blob(kind as u8, &plaintext, shared_key) @@ -70,7 +82,9 @@ impl BinaryRequest { ) -> Result { // all variants are currently encrypted let blob = match self { - BinaryRequest::ForwardSphinx { .. } => self.into_encrypted_tagged_bytes(shared_key)?, + BinaryRequest::ForwardSphinx { .. } | BinaryRequest::ForwardSphinxV2 { .. } => { + self.into_encrypted_tagged_bytes(shared_key)? + } }; Ok(Message::Binary(blob)) diff --git a/common/http-api-client/src/lib.rs b/common/http-api-client/src/lib.rs index f1068bb562c..3ebda9143da 100644 --- a/common/http-api-client/src/lib.rs +++ b/common/http-api-client/src/lib.rs @@ -439,6 +439,17 @@ impl Client { self.base_url = new_url } + /// Create new instance of `Client` using the provided base url and existing client config + pub fn clone_with_new_url(&self, new_url: Url) -> Self { + Client { + base_url: new_url, + reqwest_client: self.reqwest_client.clone(), + + #[cfg(target_arch = "wasm32")] + request_timeout: self.request_timeout, + } + } + /// Get the currently configured host that this client uses when sending API requests. pub fn current_url(&self) -> &Url { &self.base_url diff --git a/common/http-api-common/src/response/bincode.rs b/common/http-api-common/src/response/bincode.rs index 4225924aec2..cea2ae10d34 100644 --- a/common/http-api-common/src/response/bincode.rs +++ b/common/http-api-common/src/response/bincode.rs @@ -30,6 +30,10 @@ impl Bincode { self.0.headers.insert(name, value.into()); self } + + pub(crate) fn map U>(self, op: F) -> Bincode { + Bincode(self.0.map(op)) + } } impl IntoResponse for Bincode diff --git a/common/http-api-common/src/response/json.rs b/common/http-api-common/src/response/json.rs index dd65e198c25..b2c904b7ee5 100644 --- a/common/http-api-common/src/response/json.rs +++ b/common/http-api-common/src/response/json.rs @@ -32,6 +32,10 @@ impl Json { self.0.headers.insert(name, value.into()); self } + + pub(crate) fn map U>(self, op: F) -> Json { + Json(self.0.map(op)) + } } impl IntoResponse for Json diff --git a/common/http-api-common/src/response/mod.rs b/common/http-api-common/src/response/mod.rs index 9b8d0eb7a0c..ee1a84a7852 100644 --- a/common/http-api-common/src/response/mod.rs +++ b/common/http-api-common/src/response/mod.rs @@ -14,11 +14,10 @@ pub mod bincode; pub mod json; pub mod yaml; +pub use bincode::Bincode; pub use json::Json; pub use yaml::Yaml; -pub use bincode::Bincode; - #[derive(Debug, Clone, Default)] pub(crate) struct ResponseWrapper { data: T, @@ -33,6 +32,13 @@ impl ResponseWrapper { } } + pub(crate) fn map U>(self, op: F) -> ResponseWrapper { + ResponseWrapper { + data: op(self.data), + headers: self.headers, + } + } + #[must_use] pub(crate) fn with_header( mut self, @@ -60,6 +66,14 @@ impl FormattedResponse { } } + pub fn map U>(self, op: F) -> FormattedResponse { + match self { + FormattedResponse::Json(inner) => FormattedResponse::Json(inner.map(op)), + FormattedResponse::Yaml(inner) => FormattedResponse::Yaml(inner.map(op)), + FormattedResponse::Bincode(inner) => FormattedResponse::Bincode(inner.map(op)), + } + } + #[must_use] pub fn with_header( self, diff --git a/common/http-api-common/src/response/yaml.rs b/common/http-api-common/src/response/yaml.rs index d0beaaeca82..d9bc163c1ad 100644 --- a/common/http-api-common/src/response/yaml.rs +++ b/common/http-api-common/src/response/yaml.rs @@ -30,6 +30,10 @@ impl Yaml { self.0.headers.insert(name, value.into()); self } + + pub(crate) fn map U>(self, op: F) -> Yaml { + Yaml(self.0.map(op)) + } } impl IntoResponse for Yaml diff --git a/common/nymsphinx/anonymous-replies/Cargo.toml b/common/nymsphinx/anonymous-replies/Cargo.toml index 3d4c8aadc79..ef9c74b73da 100644 --- a/common/nymsphinx/anonymous-replies/Cargo.toml +++ b/common/nymsphinx/anonymous-replies/Cargo.toml @@ -10,7 +10,6 @@ repository = { workspace = true } [dependencies] rand = { workspace = true } bs58 = { workspace = true } -serde = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } @@ -22,7 +21,7 @@ nym-sphinx-types = { path = "../types" } nym-topology = { path = "../../topology" } [target."cfg(target_arch = \"wasm32\")".dependencies.wasm-bindgen] -version = "0.2.95" +workspace = true [dev-dependencies] rand_chacha = { workspace = true } diff --git a/common/nymsphinx/anonymous-replies/src/lib.rs b/common/nymsphinx/anonymous-replies/src/lib.rs index 3e48d0ea1b3..2c47a23a760 100644 --- a/common/nymsphinx/anonymous-replies/src/lib.rs +++ b/common/nymsphinx/anonymous-replies/src/lib.rs @@ -6,4 +6,4 @@ pub mod reply_surb; pub mod requests; pub use encryption_key::{SurbEncryptionKey, SurbEncryptionKeySize}; -pub use reply_surb::{ReplySurb, ReplySurbError}; +pub use reply_surb::{ReplySurb, ReplySurbError, ReplySurbWithKeyRotation}; diff --git a/common/nymsphinx/anonymous-replies/src/reply_surb.rs b/common/nymsphinx/anonymous-replies/src/reply_surb.rs index 821c0c022bd..7926c976edd 100644 --- a/common/nymsphinx/anonymous-replies/src/reply_surb.rs +++ b/common/nymsphinx/anonymous-replies/src/reply_surb.rs @@ -8,16 +8,13 @@ use nym_sphinx_addressing::nodes::{ NymNodeRoutingAddress, NymNodeRoutingAddressError, MAX_NODE_ADDRESS_UNPADDED_LEN, }; use nym_sphinx_params::packet_sizes::PacketSize; -use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm}; +use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm, SphinxKeyRotation}; use nym_sphinx_types::{ NymPacket, SURBMaterial, SphinxError, HEADER_SIZE, NODE_ADDRESS_LENGTH, SURB, X25519_WITH_EXPLICIT_PAYLOAD_KEYS_VERSION, }; use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, RngCore}; -use serde::de::{Error as SerdeError, Visitor}; -use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use std::fmt::{self, Formatter}; use std::time::Duration; use thiserror::Error; @@ -48,44 +45,6 @@ pub struct ReplySurb { pub(crate) encryption_key: SurbEncryptionKey, } -// Serialize + Deserialize is not really used anymore (it was for a CBOR experiment) -// however, if we decided we needed it again, it's already here -impl Serialize for ReplySurb { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_bytes(&self.to_bytes()) - } -} - -impl<'de> Deserialize<'de> for ReplySurb { - fn deserialize(deserializer: D) -> Result>::Error> - where - D: Deserializer<'de>, - { - struct ReplySurbVisitor; - - impl Visitor<'_> for ReplySurbVisitor { - type Value = ReplySurb; - - fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { - write!(formatter, "A replySURB must contain a valid symmetric encryption key and a correctly formed sphinx header") - } - - fn visit_bytes(self, bytes: &[u8]) -> Result - where - E: SerdeError, - { - ReplySurb::from_bytes(bytes) - .map_err(|_| SerdeError::invalid_length(bytes.len(), &self)) - } - } - - deserializer.deserialize_bytes(ReplySurbVisitor) - } -} - impl ReplySurb { /// base overhead of a reply surb that exists regardless of type or number of key materials. pub(crate) const BASE_OVERHEAD: usize = @@ -123,6 +82,7 @@ impl ReplySurb { Ok(ReplySurb { surb: surb_material.construct_SURB().unwrap(), encryption_key: SurbEncryptionKey::new(rng), + // used_key_rotation: SphinxKeyRotation::from(topology.current_key_rotation()), }) } @@ -198,8 +158,75 @@ impl ReplySurb { .use_surb(message_bytes, packet_size.payload_size()) .expect("this error indicates inconsistent message length checking - it shouldn't have happened!"); - let first_hop_address = NymNodeRoutingAddress::try_from(first_hop).unwrap(); + let first_hop_address = NymNodeRoutingAddress::try_from(first_hop)?; Ok((NymPacket::Sphinx(packet), first_hop_address)) } + + pub fn to_legacy(self) -> ReplySurbWithKeyRotation { + self.with_key_rotation(SphinxKeyRotation::Unknown) + } + + pub fn with_key_rotation(self, key_rotation: SphinxKeyRotation) -> ReplySurbWithKeyRotation { + ReplySurbWithKeyRotation { + inner: self, + key_rotation, + } + } +} + +#[derive(Debug)] +pub struct ReplySurbWithKeyRotation { + pub(crate) inner: ReplySurb, + pub(crate) key_rotation: SphinxKeyRotation, +} + +impl ReplySurbWithKeyRotation { + pub fn encryption_key(&self) -> &SurbEncryptionKey { + self.inner.encryption_key() + } + + pub fn inner_reply_surb(&self) -> &ReplySurb { + &self.inner + } + + pub fn key_rotation(&self) -> SphinxKeyRotation { + self.key_rotation + } + + pub fn apply_surb>( + self, + message: M, + packet_size: PacketSize, + _packet_type: PacketType, + ) -> Result { + let (packet, first_hop_address) = + self.inner.apply_surb(message, packet_size, _packet_type)?; + + Ok(AppliedReplySurb { + packet, + first_hop_address, + key_rotation: self.key_rotation, + }) + } +} + +pub struct AppliedReplySurb { + pub(crate) packet: NymPacket, + pub(crate) first_hop_address: NymNodeRoutingAddress, + pub(crate) key_rotation: SphinxKeyRotation, +} + +impl AppliedReplySurb { + pub fn first_hop_address(&self) -> NymNodeRoutingAddress { + self.first_hop_address + } + + pub fn key_rotation(&self) -> SphinxKeyRotation { + self.key_rotation + } + + pub fn into_packet(self) -> NymPacket { + self.packet + } } diff --git a/common/nymsphinx/anonymous-replies/src/requests/mod.rs b/common/nymsphinx/anonymous-replies/src/requests/mod.rs index 2a952363861..8dbe741b91b 100644 --- a/common/nymsphinx/anonymous-replies/src/requests/mod.rs +++ b/common/nymsphinx/anonymous-replies/src/requests/mod.rs @@ -1,16 +1,16 @@ // Copyright 2022 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -use crate::{ReplySurb, ReplySurbError}; +use crate::requests::v1::{AdditionalSurbsV1, DataV1, HeartbeatV1}; +use crate::requests::v2::{AdditionalSurbsV2, DataV2, HeartbeatV2}; +use crate::{ReplySurbError, ReplySurbWithKeyRotation}; use nym_sphinx_addressing::clients::{Recipient, RecipientFormattingError}; +use nym_sphinx_params::key_rotation::InvalidSphinxKeyRotation; use rand::{CryptoRng, RngCore}; use std::fmt::{Display, Formatter}; use std::mem; use thiserror::Error; -use crate::requests::v1::{AdditionalSurbsV1, DataV1, HeartbeatV1}; -use crate::requests::v2::{AdditionalSurbsV2, DataV2, HeartbeatV2}; - #[cfg(target_arch = "wasm32")] use wasm_bindgen::prelude::*; @@ -84,7 +84,7 @@ impl AnonymousSenderTag { #[derive(Debug, Error)] pub enum InvalidReplyRequestError { - #[error("Did not provide sufficient number of bytes to deserialize a valid request")] + #[error("Did not provide sufficient number of bytes to deserialise a valid request")] RequestTooShortToDeserialize, #[error("{received} is not a valid content tag for a repliable message")] @@ -93,10 +93,13 @@ pub enum InvalidReplyRequestError { #[error("{received} is not a valid content tag for a reply message")] InvalidReplyContentTag { received: u8 }, - #[error("failed to deserialize recipient information - {0}")] + #[error("failed to deserialise sphinx key rotation details: {0}")] + MalformedSphinxKeyRotation(#[from] InvalidSphinxKeyRotation), + + #[error("failed to deserialise recipient information: {0}")] MalformedRecipient(#[from] RecipientFormattingError), - #[error("failed to deserialize replySURB - {0}")] + #[error("failed to deserialise replySURB: {0}")] MalformedReplySurb(#[from] ReplySurbError), } @@ -136,7 +139,7 @@ impl RepliableMessage { use_legacy_surb_format: bool, data: Vec, sender_tag: AnonymousSenderTag, - reply_surbs: Vec, + reply_surbs: Vec, ) -> Self { let content = if use_legacy_surb_format { RepliableMessageContent::Data(DataV1 { @@ -159,7 +162,7 @@ impl RepliableMessage { pub fn new_additional_surbs( use_legacy_surb_format: bool, sender_tag: AnonymousSenderTag, - reply_surbs: Vec, + reply_surbs: Vec, ) -> Self { let content = if use_legacy_surb_format { RepliableMessageContent::AdditionalSurbs(AdditionalSurbsV1 { reply_surbs }) @@ -484,9 +487,10 @@ mod tests { use crate::requests::v1::{AdditionalSurbsV1, DataV1, HeartbeatV1}; use crate::requests::v2::{AdditionalSurbsV2, DataV2, HeartbeatV2}; use crate::requests::{AnonymousSenderTag, RepliableMessageContent, ReplyMessageContent}; - use crate::{ReplySurb, SurbEncryptionKey}; + use crate::{ReplySurb, ReplySurbWithKeyRotation, SurbEncryptionKey}; use nym_crypto::asymmetric::{ed25519, x25519}; use nym_sphinx_addressing::clients::Recipient; + use nym_sphinx_params::SphinxKeyRotation; use nym_sphinx_types::{ Delay, Destination, DestinationAddressBytes, Node, NodeAddressBytes, PrivateKey, SURBMaterial, NODE_ADDRESS_LENGTH, X25519_WITH_EXPLICIT_PAYLOAD_KEYS_VERSION, @@ -571,10 +575,12 @@ mod tests { n: usize, legacy: bool, hops: u8, - ) -> Vec { + ) -> Vec { let mut surbs = Vec::with_capacity(n); for _ in 0..n { - surbs.push(reply_surb(rng, legacy, hops)) + surbs.push( + reply_surb(rng, legacy, hops).with_key_rotation(SphinxKeyRotation::Unknown), + ) } surbs } diff --git a/common/nymsphinx/anonymous-replies/src/requests/v1.rs b/common/nymsphinx/anonymous-replies/src/requests/v1.rs index fec25025ac4..03f35448697 100644 --- a/common/nymsphinx/anonymous-replies/src/requests/v1.rs +++ b/common/nymsphinx/anonymous-replies/src/requests/v1.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::requests::InvalidReplyRequestError; -use crate::ReplySurb; +use crate::{ReplySurb, ReplySurbWithKeyRotation}; use nym_sphinx_types::PAYLOAD_KEY_SIZE; use std::fmt::Display; use std::mem; @@ -14,10 +14,10 @@ const fn v1_reply_surb_serialised_len() -> usize { ReplySurb::BASE_OVERHEAD + 4 * PAYLOAD_KEY_SIZE } -fn v1_reply_surbs_serialised_len(surbs: &[ReplySurb]) -> usize { +fn v1_reply_surbs_serialised_len(surbs: &[ReplySurbWithKeyRotation]) -> usize { // sanity checks; this should probably be removed later on if let Some(reply_surb) = surbs.first() { - if reply_surb.surb.uses_key_seeds() { + if reply_surb.inner.surb.uses_key_seeds() { error!("using v1 surbs encoding with updated structure - the surbs will be unusable") } } @@ -30,7 +30,7 @@ fn v1_reply_surbs_serialised_len(surbs: &[ReplySurb]) -> usize { // NUM_SURBS (u32) || SURB_DATA fn recover_reply_surbs_v1( bytes: &[u8], -) -> Result<(Vec, usize), InvalidReplyRequestError> { +) -> Result<(Vec, usize), InvalidReplyRequestError> { let mut consumed = mem::size_of::(); if bytes.len() < consumed { return Err(InvalidReplyRequestError::RequestTooShortToDeserialize); @@ -45,7 +45,7 @@ fn recover_reply_surbs_v1( let mut reply_surbs = Vec::with_capacity(num_surbs as usize); for _ in 0..num_surbs as usize { let surb_bytes = &bytes[consumed..consumed + surb_size]; - let reply_surb = ReplySurb::from_bytes(surb_bytes)?; + let reply_surb = ReplySurb::from_bytes(surb_bytes)?.to_legacy(); reply_surbs.push(reply_surb); consumed += surb_size; @@ -55,19 +55,21 @@ fn recover_reply_surbs_v1( } // length (u32) prefixed reply surbs with legacy serialisation of 4 hops and full payload keys attached -fn reply_surbs_bytes_v1(reply_surbs: &[ReplySurb]) -> impl Iterator + use<'_> { +fn reply_surbs_bytes_v1( + reply_surbs: &[ReplySurbWithKeyRotation], +) -> impl Iterator + use<'_> { let num_surbs = reply_surbs.len() as u32; num_surbs .to_be_bytes() .into_iter() - .chain(reply_surbs.iter().flat_map(|s| s.to_bytes())) + .chain(reply_surbs.iter().flat_map(|s| s.inner.to_bytes())) } #[derive(Debug)] pub struct DataV1 { pub message: Vec, - pub reply_surbs: Vec, + pub reply_surbs: Vec, } impl Display for DataV1 { @@ -83,7 +85,7 @@ impl Display for DataV1 { #[derive(Debug)] pub struct AdditionalSurbsV1 { - pub reply_surbs: Vec, + pub reply_surbs: Vec, } impl Display for AdditionalSurbsV1 { @@ -98,7 +100,7 @@ impl Display for AdditionalSurbsV1 { #[derive(Debug)] pub struct HeartbeatV1 { - pub additional_reply_surbs: Vec, + pub additional_reply_surbs: Vec, } impl Display for HeartbeatV1 { diff --git a/common/nymsphinx/anonymous-replies/src/requests/v2.rs b/common/nymsphinx/anonymous-replies/src/requests/v2.rs index 6d624455d9c..6ed60203b1f 100644 --- a/common/nymsphinx/anonymous-replies/src/requests/v2.rs +++ b/common/nymsphinx/anonymous-replies/src/requests/v2.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::requests::InvalidReplyRequestError; -use crate::ReplySurb; +use crate::{ReplySurb, ReplySurbWithKeyRotation}; +use nym_sphinx_params::SphinxKeyRotation; use nym_sphinx_types::constants::PAYLOAD_KEY_SEED_SIZE; use std::fmt::Display; use std::iter::once; @@ -13,21 +14,29 @@ const fn v2_reply_surb_serialised_len(num_hops: u8) -> usize { } // sphinx doesn't support more than 5 hops (so cast to u8 is safe) -// ASSUMPTION: all surbs are generated with the same parameters (if they're not, then the client is hurting itself) -fn reply_surbs_hops(reply_surbs: &[ReplySurb]) -> u8 { +// ASSUMPTION: all surbs are generated with the same parameters (if they're not, then the client is hurting itself), +// which includes the same number of hops and the same underlying sphinx key rotation +fn reply_surbs_hops(reply_surbs: &[ReplySurbWithKeyRotation]) -> u8 { reply_surbs .first() - .map(|reply_surb| reply_surb.surb.materials_count() as u8) + .map(|reply_surb| reply_surb.inner.surb.materials_count() as u8) .unwrap_or_default() } -fn v2_reply_surbs_serialised_len(surbs: &[ReplySurb]) -> usize { +fn key_rotation(reply_surbs: &[ReplySurbWithKeyRotation]) -> SphinxKeyRotation { + reply_surbs + .first() + .map(|reply_surb| reply_surb.key_rotation) + .unwrap_or_default() +} + +fn v2_reply_surbs_serialised_len(surbs: &[ReplySurbWithKeyRotation]) -> usize { let num_surbs = surbs.len(); let num_hops = reply_surbs_hops(surbs); // sanity checks; this should probably be removed later on if let Some(reply_surb) = surbs.first() { - if !reply_surb.surb.uses_key_seeds() { + if !reply_surb.inner.surb.uses_key_seeds() { error!("using v2 surbs encoding with legacy structure - the surbs will be unusable") } } @@ -35,14 +44,14 @@ fn v2_reply_surbs_serialised_len(surbs: &[ReplySurb]) -> usize { // when serialising surbs are always prepended with: // - u16-encoded count, // - u8-encoded number of hops - // - u8 reserved value + // - u8-encoded sphinx key rotation (or unused for 'old' variant) 4 + num_surbs * v2_reply_surb_serialised_len(num_hops) } -// NUM_SURBS (u16) || HOPS (u8) || RESERVED (u8) || SURB_DATA +// NUM_SURBS (u16) || HOPS (u8) || KEY ROTATION (u8) || SURB_DATA fn recover_reply_surbs_v2( bytes: &[u8], -) -> Result<(Vec, usize), InvalidReplyRequestError> { +) -> Result<(Vec, usize), InvalidReplyRequestError> { if bytes.len() < 4 { return Err(InvalidReplyRequestError::RequestTooShortToDeserialize); } @@ -50,7 +59,7 @@ fn recover_reply_surbs_v2( // we're not attaching more than 65k surbs... let num_surbs = u16::from_be_bytes([bytes[0], bytes[1]]); let num_hops = bytes[2]; - let _reserved = bytes[3]; + let key_rotation = SphinxKeyRotation::try_from(bytes[3])?; let mut consumed = 4; let surb_size = v2_reply_surb_serialised_len(num_hops); @@ -61,7 +70,7 @@ fn recover_reply_surbs_v2( let mut reply_surbs = Vec::with_capacity(num_surbs as usize); for _ in 0..num_surbs as usize { let surb_bytes = &bytes[consumed..consumed + surb_size]; - let reply_surb = ReplySurb::from_bytes(surb_bytes)?; + let reply_surb = ReplySurb::from_bytes(surb_bytes)?.with_key_rotation(key_rotation); reply_surbs.push(reply_surb); consumed += surb_size; @@ -70,23 +79,25 @@ fn recover_reply_surbs_v2( Ok((reply_surbs, consumed)) } -fn reply_surbs_bytes_v2(reply_surbs: &[ReplySurb]) -> impl Iterator + use<'_> { +fn reply_surbs_bytes_v2( + reply_surbs: &[ReplySurbWithKeyRotation], +) -> impl Iterator + use<'_> { let num_surbs = reply_surbs.len() as u16; let num_hops = reply_surbs_hops(reply_surbs); - let reserved = 0; + let key_rotation = key_rotation(reply_surbs) as u8; num_surbs .to_be_bytes() .into_iter() .chain(once(num_hops)) - .chain(once(reserved)) - .chain(reply_surbs.iter().flat_map(|surb| surb.to_bytes())) + .chain(once(key_rotation)) + .chain(reply_surbs.iter().flat_map(|surb| surb.inner.to_bytes())) } #[derive(Debug)] pub struct DataV2 { pub message: Vec, - pub reply_surbs: Vec, + pub reply_surbs: Vec, } impl Display for DataV2 { @@ -102,7 +113,7 @@ impl Display for DataV2 { #[derive(Debug)] pub struct AdditionalSurbsV2 { - pub reply_surbs: Vec, + pub reply_surbs: Vec, } impl Display for AdditionalSurbsV2 { @@ -117,7 +128,7 @@ impl Display for AdditionalSurbsV2 { #[derive(Debug)] pub struct HeartbeatV2 { - pub additional_reply_surbs: Vec, + pub additional_reply_surbs: Vec, } impl Display for HeartbeatV2 { diff --git a/common/nymsphinx/cover/src/lib.rs b/common/nymsphinx/cover/src/lib.rs index 55d71e6c3da..42be1db410f 100644 --- a/common/nymsphinx/cover/src/lib.rs +++ b/common/nymsphinx/cover/src/lib.rs @@ -10,7 +10,9 @@ use nym_sphinx_addressing::nodes::NymNodeRoutingAddress; use nym_sphinx_chunking::fragment::COVER_FRAG_ID; use nym_sphinx_forwarding::packet::MixPacket; use nym_sphinx_params::packet_sizes::PacketSize; -use nym_sphinx_params::{PacketEncryptionAlgorithm, PacketHkdfAlgorithm, PacketType}; +use nym_sphinx_params::{ + PacketEncryptionAlgorithm, PacketHkdfAlgorithm, PacketType, SphinxKeyRotation, +}; use nym_sphinx_types::NymPacket; use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, RngCore}; @@ -125,6 +127,9 @@ where let delays = nym_sphinx_routing::generate_hop_delays(average_packet_delay, route.len()); let destination = full_address.as_sphinx_destination(); + let rotation_id = topology.current_key_rotation(); + let sphinx_key_rotation = SphinxKeyRotation::from(rotation_id); + let first_hop_address = NymNodeRoutingAddress::try_from(route.first().unwrap().address).unwrap(); @@ -146,7 +151,12 @@ where )?, }; - Ok(MixPacket::new(first_hop_address, packet, packet_type)) + Ok(MixPacket::new( + first_hop_address, + packet, + packet_type, + sphinx_key_rotation, + )) } /// Helper function used to determine if given message represents a loop cover message. diff --git a/common/nymsphinx/forwarding/Cargo.toml b/common/nymsphinx/forwarding/Cargo.toml index c3f3e0dc9f3..c8beb33b12f 100644 --- a/common/nymsphinx/forwarding/Cargo.toml +++ b/common/nymsphinx/forwarding/Cargo.toml @@ -11,5 +11,5 @@ repository = { workspace = true } nym-sphinx-addressing = { path = "../addressing" } nym-sphinx-params = { path = "../params" } nym-sphinx-types = { path = "../types", features = ["sphinx", "outfox"] } -nym-outfox = { path = "../../../nym-outfox" } +nym-sphinx-anonymous-replies = { path = "../anonymous-replies" } thiserror = { workspace = true } diff --git a/common/nymsphinx/forwarding/src/packet.rs b/common/nymsphinx/forwarding/src/packet.rs index 97b1419aa1c..9fb9f3a9155 100644 --- a/common/nymsphinx/forwarding/src/packet.rs +++ b/common/nymsphinx/forwarding/src/packet.rs @@ -2,24 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 use nym_sphinx_addressing::nodes::{NymNodeRoutingAddress, NymNodeRoutingAddressError}; -use nym_sphinx_params::{PacketSize, PacketType}; +use nym_sphinx_params::{PacketSize, PacketType, SphinxKeyRotation}; use nym_sphinx_types::{NymPacket, NymPacketError}; -use std::fmt::{self, Debug, Formatter}; +use nym_sphinx_anonymous_replies::reply_surb::AppliedReplySurb; +use nym_sphinx_params::key_rotation::InvalidSphinxKeyRotation; +use nym_sphinx_params::packet_sizes::InvalidPacketSize; +use nym_sphinx_params::packet_types::InvalidPacketType; +use std::net::SocketAddr; use thiserror::Error; #[derive(Debug, Error)] pub enum MixPacketFormattingError { #[error("too few bytes provided to recover from bytes")] TooFewBytesProvided, - #[error("provided packet mode is invalid")] - InvalidPacketType, - #[error("received request had invalid size - received {0}")] - InvalidPacketSize(usize), + + #[error("provided packet mode is invalid: {0}")] + InvalidPacketType(#[from] InvalidPacketType), + + #[error("received request had an invalid packet size: {0}")] + InvalidPacketSize(#[from] InvalidPacketSize), + + #[error("provided key rotation is invalid: {0}")] + InvalidKeyRotation(#[from] InvalidSphinxKeyRotation), + #[error("address field was incorrectly encoded")] InvalidAddress, + #[error("received sphinx packet was malformed")] MalformedSphinxPacket, + #[error("Packet: {0}")] Packet(#[from] NymPacketError), } @@ -30,20 +42,12 @@ impl From for MixPacketFormattingError { } } +#[derive(Debug)] pub struct MixPacket { next_hop: NymNodeRoutingAddress, packet: NymPacket, packet_type: PacketType, -} - -impl Debug for MixPacket { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!( - f, - "MixPacket to {:?} with packet_type {:?}. Packet {:?}", - self.next_hop, self.packet_type, self.packet - ) - } + key_rotation: SphinxKeyRotation, } impl MixPacket { @@ -51,11 +55,25 @@ impl MixPacket { next_hop: NymNodeRoutingAddress, packet: NymPacket, packet_type: PacketType, + key_rotation: SphinxKeyRotation, ) -> Self { MixPacket { next_hop, packet, packet_type, + key_rotation, + } + } + + pub fn from_applied_surb( + applied_reply_surb: AppliedReplySurb, + packet_type: PacketType, + ) -> Self { + MixPacket { + next_hop: applied_reply_surb.first_hop_address(), + key_rotation: applied_reply_surb.key_rotation(), + packet: applied_reply_surb.into_packet(), + packet_type, } } @@ -63,6 +81,10 @@ impl MixPacket { self.next_hop } + pub fn next_hop_address(&self) -> SocketAddr { + self.next_hop.into() + } + pub fn packet(&self) -> &NymPacket { &self.packet } @@ -71,41 +93,90 @@ impl MixPacket { self.packet } + pub fn key_rotation(&self) -> SphinxKeyRotation { + self.key_rotation + } + pub fn packet_type(&self) -> PacketType { self.packet_type } // the message is formatted as follows: // packet_type || FIRST_HOP || packet - pub fn try_from_bytes(b: &[u8]) -> Result { - let packet_type = match PacketType::try_from(b[0]) { - Ok(mode) => mode, - Err(_) => return Err(MixPacketFormattingError::InvalidPacketType), - }; + pub fn try_from_v1_bytes(b: &[u8]) -> Result { + // we need at least 1 byte to read packet type and another one to read type of the encoded first hop address + if b.len() < 2 { + return Err(MixPacketFormattingError::TooFewBytesProvided); + } + + let packet_type = PacketType::try_from(b[0])?; let next_hop = NymNodeRoutingAddress::try_from_bytes(&b[1..])?; let addr_offset = next_hop.bytes_min_len(); let packet_data = &b[addr_offset + 1..]; let packet_size = packet_data.len(); - if PacketSize::get_type(packet_size).is_err() { - Err(MixPacketFormattingError::InvalidPacketSize(packet_size)) - } else { - let packet = match packet_type { - PacketType::Outfox => NymPacket::outfox_from_bytes(packet_data)?, - _ => NymPacket::sphinx_from_bytes(packet_data)?, - }; - - Ok(MixPacket { - next_hop, - packet, - packet_type, - }) + + // make sure the received data length corresponds to a valid packet + let _ = PacketSize::get_type(packet_size)?; + + let packet = match packet_type { + PacketType::Mix => NymPacket::sphinx_from_bytes(packet_data)?, + PacketType::Outfox => NymPacket::outfox_from_bytes(packet_data)?, + }; + + Ok(MixPacket { + next_hop, + packet, + packet_type, + key_rotation: SphinxKeyRotation::Unknown, + }) + } + + pub fn into_v1_bytes(self) -> Result, MixPacketFormattingError> { + Ok(std::iter::once(self.packet_type as u8) + .chain(self.next_hop.as_bytes()) + .chain(self.packet.to_bytes()?) + .collect()) + } + + // the message is formatted as follows: + // packet_type || KEY_ROTATION || FIRST_HOP || packet + pub fn try_from_v2_bytes(b: &[u8]) -> Result { + // we need at least 1 byte to read packet type, 1 byte to read key rotation + // and finally another one to read type of the encoded first hop address + if b.len() < 3 { + return Err(MixPacketFormattingError::TooFewBytesProvided); } + + let packet_type = PacketType::try_from(b[0])?; + let key_rotation = SphinxKeyRotation::try_from(b[1])?; + + let next_hop = NymNodeRoutingAddress::try_from_bytes(&b[2..])?; + let addr_offset = next_hop.bytes_min_len(); + + let packet_data = &b[addr_offset + 2..]; + let packet_size = packet_data.len(); + + // make sure the received data length corresponds to a valid packet + let _ = PacketSize::get_type(packet_size)?; + + let packet = match packet_type { + PacketType::Mix => NymPacket::sphinx_from_bytes(packet_data)?, + PacketType::Outfox => NymPacket::outfox_from_bytes(packet_data)?, + }; + + Ok(MixPacket { + next_hop, + packet, + packet_type, + key_rotation, + }) } - pub fn into_bytes(self) -> Result, MixPacketFormattingError> { + pub fn into_v2_bytes(self) -> Result, MixPacketFormattingError> { Ok(std::iter::once(self.packet_type as u8) + .chain(std::iter::once(self.key_rotation as u8)) .chain(self.next_hop.as_bytes()) .chain(self.packet.to_bytes()?) .collect()) diff --git a/common/nymsphinx/framing/src/codec.rs b/common/nymsphinx/framing/src/codec.rs index 85685581aa6..da148908e47 100644 --- a/common/nymsphinx/framing/src/codec.rs +++ b/common/nymsphinx/framing/src/codec.rs @@ -3,6 +3,7 @@ use crate::packet::{FramedNymPacket, Header}; use bytes::{Buf, BufMut, BytesMut}; +use nym_sphinx_params::key_rotation::InvalidSphinxKeyRotation; use nym_sphinx_params::packet_sizes::{InvalidPacketSize, PacketSize}; use nym_sphinx_params::packet_types::InvalidPacketType; use nym_sphinx_params::packet_version::{InvalidPacketVersion, PacketVersion}; @@ -23,6 +24,9 @@ pub enum NymCodecError { #[error("the packet version information was malformed: {0}")] InvalidPacketVersion(#[from] InvalidPacketVersion), + #[error("the sphinx key rotation information was malformed: {0}")] + InvalidSphinxKeyRotation(#[from] InvalidSphinxKeyRotation), + #[error("received unsupported packet version {received}. max supported is {max_supported}")] UnsupportedPacketVersion { received: PacketVersion, @@ -65,8 +69,8 @@ impl Decoder for NymCodec { fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { if src.is_empty() { // can't do anything if we have no bytes, but let's reserve enough for the most - // conservative case, i.e. receiving an ack packet - src.reserve(Header::SIZE + PacketSize::AckPacket.size()); + // conservative case, i.e. receiving a legacy ack packet + src.reserve(Header::INITIAL_SIZE + PacketSize::AckPacket.size()); return Ok(None); } @@ -77,17 +81,20 @@ impl Decoder for NymCodec { None => return Ok(None), // we have some data but not enough to get header back }; + let header_size = header.encoded_size(); let packet_size = header.packet_size.size(); - let frame_len = Header::SIZE + packet_size; - if src.len() < frame_len { + let frame_size = header_size + packet_size; + + if src.len() < frame_size { // we don't have enough bytes to read the rest of frame + // (we have already read the full header) src.reserve(packet_size); return Ok(None); } // advance buffer past the header - at this point we have enough bytes - src.advance(Header::SIZE); + src.advance(header_size); let packet_bytes = src.split_to(packet_size); let packet = if let Some(slice) = packet_bytes.get(..) { // here it could be debatable whether stream is corrupt or not, @@ -100,8 +107,7 @@ impl Decoder for NymCodec { return Ok(None); }; - // let packet = SphinxPacket::from_bytes(&sphinx_packet_bytes)?; - let nymsphinx_packet = FramedNymPacket { header, packet }; + let framed_packet = FramedNymPacket { header, packet }; // As per docs: // Before returning from the function, implementations should ensure that the buffer @@ -114,11 +120,11 @@ impl Decoder for NymCodec { // we also assume the next packet coming from the same client will use exactly the same versioning // as the current packet - let mut allocate_for_next_packet = Header::SIZE + PacketSize::AckPacket.size(); + let mut allocate_for_next_packet = header.encoded_size() + PacketSize::AckPacket.size(); if !src.is_empty() { match Header::decode(src) { Ok(Some(next_header)) => { - allocate_for_next_packet = Header::SIZE + next_header.packet_size.size(); + allocate_for_next_packet = next_header.frame_size(); } Ok(None) => { // we don't have enough information to know how much to reserve, fallback to the ack case @@ -126,22 +132,52 @@ impl Decoder for NymCodec { // the next frame will be malformed but let's leave handling the error to the next // call to 'decode', as presumably, the current sphinx packet is still valid - Err(_) => return Ok(Some(nymsphinx_packet)), + Err(_) => return Ok(Some(framed_packet)), }; } src.reserve(allocate_for_next_packet); - Ok(Some(nymsphinx_packet)) + Ok(Some(framed_packet)) } } #[cfg(test)] mod packet_encoding { use super::*; + use nym_sphinx_params::packet_version::{ + CURRENT_PACKET_VERSION, INITIAL_PACKET_VERSION_NUMBER, + }; + use nym_sphinx_params::PacketType; use nym_sphinx_types::{ Delay as SphinxDelay, Destination, DestinationAddressBytes, Node, NodeAddressBytes, - PrivateKey, DESTINATION_ADDRESS_LENGTH, IDENTIFIER_LENGTH, NODE_ADDRESS_LENGTH, + NymPacket, PrivateKey, DESTINATION_ADDRESS_LENGTH, IDENTIFIER_LENGTH, NODE_ADDRESS_LENGTH, }; + fn dummy_header() -> Header { + Header { + packet_version: CURRENT_PACKET_VERSION, + packet_size: Default::default(), + key_rotation: Default::default(), + packet_type: Default::default(), + } + } + + fn dummy_outfox() -> Header { + Header { + packet_type: PacketType::Outfox, + packet_size: PacketSize::OutfoxRegularPacket, + ..dummy_legacy_header() + } + } + + fn dummy_legacy_header() -> Header { + Header { + packet_version: PacketVersion::try_from(INITIAL_PACKET_VERSION_NUMBER).unwrap(), + packet_size: Default::default(), + key_rotation: Default::default(), + packet_type: Default::default(), + } + } + fn random_pubkey() -> nym_sphinx_types::PublicKey { let private_key = PrivateKey::random(); (&private_key).into() @@ -222,7 +258,7 @@ mod packet_encoding { #[test] fn whole_packet_can_be_decoded_from_a_valid_encoded_instance() { - let header = Default::default(); + let header = dummy_header(); let sphinx_packet = make_valid_sphinx_packet(Default::default()); let sphinx_bytes = sphinx_packet.to_bytes().unwrap(); @@ -241,7 +277,7 @@ mod packet_encoding { #[test] fn whole_outfox_can_be_decoded_from_a_valid_encoded_instance() { - let header = Header::outfox(); + let header = dummy_outfox(); let packet = make_valid_outfox_packet(PacketSize::OutfoxRegularPacket); let packet_bytes = packet.to_bytes().unwrap(); @@ -269,7 +305,7 @@ mod packet_encoding { assert!(NymCodec.decode(&mut empty_bytes).unwrap().is_none()); assert_eq!( empty_bytes.capacity(), - Header::SIZE + PacketSize::AckPacket.size() + Header::INITIAL_SIZE + PacketSize::AckPacket.size() ); } @@ -287,13 +323,14 @@ mod packet_encoding { let header = Header { packet_version: PacketVersion::new(), packet_size, - ..Default::default() + key_rotation: Default::default(), + packet_type: Default::default(), }; let mut bytes = BytesMut::new(); header.encode(&mut bytes); assert!(NymCodec.decode(&mut bytes).unwrap().is_none()); - assert_eq!(bytes.capacity(), Header::SIZE + packet_size.size()) + assert_eq!(bytes.capacity(), Header::V8_SIZE + packet_size.size()) } } @@ -301,7 +338,7 @@ mod packet_encoding { fn for_full_frame_with_versioned_header() { // if full frame is used exactly, there should be enough space for header + ack packet let packet = FramedNymPacket { - header: Header::default(), + header: dummy_header(), packet: make_valid_sphinx_packet(Default::default()), }; @@ -310,7 +347,7 @@ mod packet_encoding { assert!(NymCodec.decode(&mut bytes).unwrap().is_some()); assert_eq!( bytes.capacity(), - Header::SIZE + PacketSize::AckPacket.size() + Header::V8_SIZE + PacketSize::AckPacket.size() ); } @@ -327,7 +364,7 @@ mod packet_encoding { for packet_size in packet_sizes { let first_packet = FramedNymPacket { - header: Header::default(), + header: dummy_header(), packet: make_valid_sphinx_packet(Default::default()), }; @@ -346,12 +383,12 @@ mod packet_encoding { #[test] fn can_decode_two_packets_immediately() { let packet1 = FramedNymPacket { - header: Header::default(), + header: dummy_header(), packet: make_valid_sphinx_packet(Default::default()), }; let packet2 = FramedNymPacket { - header: Header::default(), + header: dummy_header(), packet: make_valid_sphinx_packet(Default::default()), }; @@ -368,12 +405,12 @@ mod packet_encoding { #[test] fn can_decode_two_packets_in_separate_calls() { let packet1 = FramedNymPacket { - header: Header::default(), + header: dummy_header(), packet: make_valid_sphinx_packet(Default::default()), }; let packet2 = FramedNymPacket { - header: Header::default(), + header: dummy_header(), packet: make_valid_sphinx_packet(Default::default()), }; diff --git a/common/nymsphinx/framing/src/packet.rs b/common/nymsphinx/framing/src/packet.rs index 184444f807d..d246e27ffb5 100644 --- a/common/nymsphinx/framing/src/packet.rs +++ b/common/nymsphinx/framing/src/packet.rs @@ -3,6 +3,8 @@ use crate::codec::NymCodecError; use bytes::{BufMut, BytesMut}; +use nym_sphinx_forwarding::packet::MixPacket; +use nym_sphinx_params::key_rotation::SphinxKeyRotation; use nym_sphinx_params::packet_sizes::PacketSize; use nym_sphinx_params::packet_version::{PacketVersion, CURRENT_PACKET_VERSION}; use nym_sphinx_params::PacketType; @@ -17,8 +19,20 @@ pub struct FramedNymPacket { pub(crate) packet: NymPacket, } +impl From for FramedNymPacket { + fn from(packet: MixPacket) -> Self { + let typ = packet.packet_type(); + let rot = packet.key_rotation(); + FramedNymPacket::new(packet.into_packet(), typ, rot) + } +} + impl FramedNymPacket { - pub fn new(packet: NymPacket, packet_type: PacketType) -> Self { + pub fn new( + packet: NymPacket, + packet_type: PacketType, + key_rotation: SphinxKeyRotation, + ) -> Self { // If this fails somebody is using the library in a super incorrect way, because they // already managed to somehow create a sphinx packet let packet_size = PacketSize::get_type(packet.len()).unwrap(); @@ -26,6 +40,7 @@ impl FramedNymPacket { let header = Header { packet_version: PacketVersion::new(), packet_size, + key_rotation, packet_type, }; @@ -52,6 +67,10 @@ impl FramedNymPacket { &self.packet } + pub fn key_rotation(&self) -> SphinxKeyRotation { + self.header.key_rotation + } + pub fn is_sphinx(&self) -> bool { self.packet.is_sphinx() } @@ -60,13 +79,16 @@ impl FramedNymPacket { // Contains any metadata that might be useful for sending between mix nodes. // TODO: in theory all those data could be put in a single `u8` by setting appropriate bits, // but would that really be worth it? -#[derive(Debug, Default, PartialEq, Eq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct Header { /// Represents the wire format version used to construct this packet. - pub(crate) packet_version: PacketVersion, + pub packet_version: PacketVersion, /// Represents type and consequently size of the included SphinxPacket. - pub(crate) packet_size: PacketSize, + pub packet_size: PacketSize, + + /// Represents information regarding which key rotation has been used for constructing this packet. + pub key_rotation: SphinxKeyRotation, /// Represents whether this packet is sent in a `vpn_mode` meaning it should not get delayed /// and shared keys might get reused. Mixnodes are capable of inferring this mode from the @@ -77,35 +99,48 @@ pub struct Header { /// (note: this will be behind some encryption, either something implemented by us or some SSL action) // Note: currently packet_type is deprecated but is still left as a concept behind to not break // compatibility with existing network - pub(crate) packet_type: PacketType, + pub packet_type: PacketType, } impl Header { - pub(crate) const SIZE: usize = 3; - - pub fn outfox() -> Header { - Header { - packet_version: PacketVersion::default(), - packet_size: PacketSize::OutfoxRegularPacket, - packet_type: PacketType::Outfox, - } - } + pub(crate) const INITIAL_SIZE: usize = 3; + pub(crate) const V8_SIZE: usize = 4; pub(crate) fn encode(&self, dst: &mut BytesMut) { - dst.reserve(Self::SIZE); + let len = self.encoded_size(); + + if dst.len() < len { + dst.reserve(len); + } dst.put_u8(self.packet_version.as_u8()); dst.put_u8(self.packet_size as u8); dst.put_u8(self.packet_type as u8); + if !self.packet_version.is_initial() { + dst.put_u8(self.key_rotation as u8) + } + // reserve bytes for the actual packet dst.reserve(self.packet_size.size()); } + pub(crate) fn frame_size(&self) -> usize { + self.encoded_size() + self.packet_size.size() + } + + pub(crate) fn encoded_size(&self) -> usize { + if self.packet_version.is_initial() { + Self::INITIAL_SIZE + } else { + Self::V8_SIZE + } + } + pub(crate) fn decode(src: &mut BytesMut) -> Result, NymCodecError> { - if src.len() < Self::SIZE { + if src.len() < Self::INITIAL_SIZE { // can't do anything if we don't have enough bytes - but reserve enough for the next call - src.reserve(Self::SIZE); + src.reserve(Self::INITIAL_SIZE); return Ok(None); } @@ -119,10 +154,23 @@ impl Header { }); } + // we need to be able to decode the full header + if !packet_version.is_initial() && src.len() < Self::V8_SIZE { + src.reserve(1); + return Ok(None); + } + + let key_rotation = if packet_version.is_initial() { + SphinxKeyRotation::Unknown + } else { + SphinxKeyRotation::try_from(src[3])? + }; + Ok(Some(Header { packet_version, packet_size: PacketSize::try_from(src[1])?, packet_type: PacketType::try_from(src[2])?, + key_rotation, })) } } @@ -130,10 +178,20 @@ impl Header { #[cfg(test)] mod header_encoding { use super::*; + use nym_sphinx_params::packet_version::INITIAL_PACKET_VERSION_NUMBER; + + fn dummy_header() -> Header { + Header { + packet_version: CURRENT_PACKET_VERSION, + packet_size: Default::default(), + key_rotation: Default::default(), + packet_type: Default::default(), + } + } #[test] fn header_can_be_decoded_from_a_valid_encoded_instance() { - let header = Header::default(); + let header = dummy_header(); let mut bytes = BytesMut::new(); header.encode(&mut bytes); let decoded = Header::decode(&mut bytes).unwrap().unwrap(); @@ -153,6 +211,7 @@ mod header_encoding { PacketVersion::new().as_u8(), unknown_packet_size, PacketType::default() as u8, + SphinxKeyRotation::EvenRotation as u8, ] .as_ref(), ); @@ -167,7 +226,9 @@ mod header_encoding { let mut bytes = BytesMut::from( [ - PacketVersion::new().as_u8(), + PacketVersion::try_from(INITIAL_PACKET_VERSION_NUMBER) + .unwrap() + .as_u8(), PacketSize::default() as u8, unknown_packet_type, ] @@ -181,12 +242,12 @@ mod header_encoding { let mut empty_bytes = BytesMut::new(); let decode_attempt_1 = Header::decode(&mut empty_bytes).unwrap(); assert!(decode_attempt_1.is_none()); - assert!(empty_bytes.capacity() > Header::SIZE); + assert!(empty_bytes.capacity() > Header::V8_SIZE); let mut empty_bytes = BytesMut::with_capacity(1); let decode_attempt_2 = Header::decode(&mut empty_bytes).unwrap(); assert!(decode_attempt_2.is_none()); - assert!(empty_bytes.capacity() > Header::SIZE); + assert!(empty_bytes.capacity() > Header::V8_SIZE); } #[test] @@ -202,7 +263,7 @@ mod header_encoding { let header = Header { packet_version: PacketVersion::new(), packet_size, - ..Default::default() + ..dummy_header() }; let mut bytes = BytesMut::new(); header.encode(&mut bytes); @@ -217,6 +278,7 @@ mod header_encoding { let unchecked_header = Header { packet_version: future_version, packet_size: PacketSize::RegularPacket, + key_rotation: SphinxKeyRotation::EvenRotation, packet_type: PacketType::Mix, }; let mut bytes = BytesMut::new(); diff --git a/common/nymsphinx/framing/src/processing.rs b/common/nymsphinx/framing/src/processing.rs index 1dc9ebf0669..3703f36d847 100644 --- a/common/nymsphinx/framing/src/processing.rs +++ b/common/nymsphinx/framing/src/processing.rs @@ -5,7 +5,7 @@ use crate::packet::FramedNymPacket; use nym_sphinx_acknowledgements::surb_ack::{SurbAck, SurbAckRecoveryError}; use nym_sphinx_addressing::nodes::{NymNodeRoutingAddress, NymNodeRoutingAddressError}; use nym_sphinx_forwarding::packet::MixPacket; -use nym_sphinx_params::{PacketSize, PacketType}; +use nym_sphinx_params::{PacketSize, PacketType, SphinxKeyRotation}; use nym_sphinx_types::header::shared_secret::ExpandedSharedSecret; use nym_sphinx_types::{ Delay as SphinxDelay, DestinationAddressBytes, NodeAddressBytes, NymPacket, NymPacketError, @@ -103,10 +103,18 @@ pub enum PacketProcessingError { #[error("attempted to partially process an outfox packet")] PartialOutfoxProcessing, + #[error("the key needed for unwrapping this packet has already expired")] + ExpiredKey, + #[error("this packet has already been processed before")] PacketReplay, } +pub struct PartialyUnwrappedPacketWithKeyRotation { + pub packet: PartiallyUnwrappedPacket, + pub used_key_rotation: u32, +} + pub struct PartiallyUnwrappedPacket { received_data: FramedNymPacket, partial_result: PartialMixProcessingResult, @@ -119,16 +127,19 @@ impl PartiallyUnwrappedPacket { pub fn new( received_data: FramedNymPacket, sphinx_key: &PrivateKey, - ) -> Result { + ) -> Result { let partial_result = match received_data.packet() { NymPacket::Sphinx(packet) => { let expanded_shared_secret = packet.header.compute_expanded_shared_secret(sphinx_key); // don't continue if the header is malformed - packet + if let Err(err) = packet .header - .ensure_header_integrity(&expanded_shared_secret)?; + .ensure_header_integrity(&expanded_shared_secret) + { + return Err((received_data, err.into())); + } PartialMixProcessingResult::Sphinx { expanded_shared_secret, @@ -147,6 +158,7 @@ impl PartiallyUnwrappedPacket { let packet_size = self.received_data.packet_size(); let packet_type = self.received_data.packet_type(); + let key_rotation = self.received_data.header.key_rotation; let packet = self.received_data.into_inner(); // currently partial unwrapping is only implemented for sphinx packets. @@ -161,12 +173,22 @@ impl PartiallyUnwrappedPacket { return Err(PacketProcessingError::PartialOutfoxProcessing); }; let processed_packet = packet.process_with_expanded_secret(&expanded_shared_secret)?; - wrap_processed_sphinx_packet(processed_packet, packet_size, packet_type) + wrap_processed_sphinx_packet(processed_packet, packet_size, packet_type, key_rotation) } pub fn replay_tag(&self) -> Option<&[u8; REPLAY_TAG_SIZE]> { self.partial_result.replay_tag() } + + pub fn with_key_rotation( + self, + used_key_rotation: u32, + ) -> PartialyUnwrappedPacketWithKeyRotation { + PartialyUnwrappedPacketWithKeyRotation { + packet: self, + used_key_rotation, + } + } } impl From<(FramedNymPacket, PartialMixProcessingResult)> for PartiallyUnwrappedPacket { @@ -186,13 +208,14 @@ pub fn process_framed_packet( ) -> Result { let packet_size = received.packet_size(); let packet_type = received.packet_type(); + let key_rotation = received.key_rotation(); // unwrap the sphinx packet let processed_packet = perform_framed_unwrapping(received, sphinx_key)?; // for forward packets, extract next hop and set delay (but do NOT delay here) // for final packets, extract SURBAck - perform_final_processing(processed_packet, packet_size, packet_type) + perform_final_processing(processed_packet, packet_size, packet_type, key_rotation) } fn perform_framed_unwrapping( @@ -217,6 +240,7 @@ fn wrap_processed_sphinx_packet( packet: nym_sphinx_types::ProcessedPacket, packet_size: PacketSize, packet_type: PacketType, + key_rotation: SphinxKeyRotation, ) -> Result { let processing_data = match packet.data { ProcessedPacketData::ForwardHop { @@ -228,6 +252,7 @@ fn wrap_processed_sphinx_packet( next_hop_address, delay, packet_type, + key_rotation, ), // right now there's no use for the surb_id included in the header - probably it should get removed from the // sphinx all together? @@ -240,6 +265,7 @@ fn wrap_processed_sphinx_packet( payload.recover_plaintext()?, packet_size, packet_type, + key_rotation, ), }?; @@ -253,6 +279,7 @@ fn wrap_processed_outfox_packet( packet: OutfoxProcessedPacket, packet_size: PacketSize, packet_type: PacketType, + key_rotation: SphinxKeyRotation, ) -> Result { let next_address = *packet.next_address(); let packet = packet.into_packet(); @@ -262,6 +289,7 @@ fn wrap_processed_outfox_packet( packet.recover_plaintext()?.to_vec(), packet_size, packet_type, + key_rotation, )?; Ok(MixProcessingResult { packet_version: MixPacketVersion::Outfox, @@ -272,6 +300,7 @@ fn wrap_processed_outfox_packet( NymNodeRoutingAddress::try_from_bytes(&next_address)?, NymPacket::Outfox(packet), PacketType::Outfox, + SphinxKeyRotation::Unknown, ); Ok(MixProcessingResult { packet_version: MixPacketVersion::Outfox, @@ -287,13 +316,14 @@ fn perform_final_processing( packet: NymProcessedPacket, packet_size: PacketSize, packet_type: PacketType, + key_rotation: SphinxKeyRotation, ) -> Result { match packet { NymProcessedPacket::Sphinx(packet) => { - wrap_processed_sphinx_packet(packet, packet_size, packet_type) + wrap_processed_sphinx_packet(packet, packet_size, packet_type, key_rotation) } NymProcessedPacket::Outfox(packet) => { - wrap_processed_outfox_packet(packet, packet_size, packet_type) + wrap_processed_outfox_packet(packet, packet_size, packet_type, key_rotation) } } } @@ -303,8 +333,10 @@ fn process_final_hop( payload: Vec, packet_size: PacketSize, packet_type: PacketType, + key_rotation: SphinxKeyRotation, ) -> Result { - let (forward_ack, message) = split_into_ack_and_message(payload, packet_size, packet_type)?; + let (forward_ack, message) = + split_into_ack_and_message(payload, packet_size, packet_type, key_rotation)?; Ok(MixProcessingResultData::FinalHop { final_hop_data: ProcessedFinalHop { @@ -319,6 +351,7 @@ fn split_into_ack_and_message( data: Vec, packet_size: PacketSize, packet_type: PacketType, + key_rotation: SphinxKeyRotation, ) -> Result<(Option, Vec), PacketProcessingError> { match packet_size { PacketSize::AckPacket | PacketSize::OutfoxAckPacket => { @@ -340,7 +373,7 @@ fn split_into_ack_and_message( return Err(err.into()); } }; - let forward_ack = MixPacket::new(ack_first_hop, ack_packet, packet_type); + let forward_ack = MixPacket::new(ack_first_hop, ack_packet, packet_type, key_rotation); Ok((Some(forward_ack), message)) } } @@ -368,10 +401,11 @@ fn process_forward_hop( forward_address: NodeAddressBytes, delay: SphinxDelay, packet_type: PacketType, + key_rotation: SphinxKeyRotation, ) -> Result { let next_hop_address = NymNodeRoutingAddress::try_from(forward_address)?; - let packet = MixPacket::new(next_hop_address, packet, packet_type); + let packet = MixPacket::new(next_hop_address, packet, packet_type, key_rotation); Ok(MixProcessingResultData::ForwardHop { packet, delay: Some(delay), @@ -422,9 +456,13 @@ mod tests { #[tokio::test] async fn splitting_into_ack_and_message_returns_whole_data_for_ack() { let data = vec![42u8; SurbAck::len(Some(PacketType::Mix)) + 10]; - let (ack, message) = - split_into_ack_and_message(data.clone(), PacketSize::AckPacket, PacketType::Mix) - .unwrap(); + let (ack, message) = split_into_ack_and_message( + data.clone(), + PacketSize::AckPacket, + PacketType::Mix, + SphinxKeyRotation::EvenRotation, + ) + .unwrap(); assert!(ack.is_none()); assert_eq!(data, message) } @@ -436,6 +474,7 @@ mod tests { data.clone(), PacketSize::OutfoxAckPacket, PacketType::Outfox, + SphinxKeyRotation::EvenRotation, ) .unwrap(); assert!(ack.is_none()); diff --git a/common/nymsphinx/params/src/key_rotation.rs b/common/nymsphinx/params/src/key_rotation.rs new file mode 100644 index 00000000000..7c6e6179af1 --- /dev/null +++ b/common/nymsphinx/params/src/key_rotation.rs @@ -0,0 +1,50 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use thiserror::Error; + +#[derive(Default, Clone, Copy, Debug, PartialEq, Eq)] +#[repr(u8)] +pub enum SphinxKeyRotation { + // for legacy packets, where there's no explicit information which key has been used + #[default] + Unknown = 0, + + OddRotation = 1, + + EvenRotation = 2, +} + +#[derive(Debug, Error)] +#[error("{received} is not a valid encoding of a sphinx key rotation")] +pub struct InvalidSphinxKeyRotation { + received: u8, +} + +// convert from particular rotation id into SphinxKeyRotation variant +impl From for SphinxKeyRotation { + fn from(value: u32) -> Self { + if value == 0 || value == u32::MAX { + SphinxKeyRotation::Unknown + } else if value % 2 == 0 { + SphinxKeyRotation::EvenRotation + } else { + SphinxKeyRotation::OddRotation + } + } +} + +// convert from an encoded SphinxKeyRotation into particular variant +// if value is actually provided, it MUST be one of the two. otherwise is invalid +impl TryFrom for SphinxKeyRotation { + type Error = InvalidSphinxKeyRotation; + + fn try_from(value: u8) -> Result { + match value { + _ if value == (Self::Unknown as u8) => Ok(Self::Unknown), + _ if value == (Self::OddRotation as u8) => Ok(Self::OddRotation), + _ if value == (Self::EvenRotation as u8) => Ok(Self::EvenRotation), + received => Err(InvalidSphinxKeyRotation { received }), + } + } +} diff --git a/common/nymsphinx/params/src/lib.rs b/common/nymsphinx/params/src/lib.rs index 69c8368e994..9046425a402 100644 --- a/common/nymsphinx/params/src/lib.rs +++ b/common/nymsphinx/params/src/lib.rs @@ -9,9 +9,12 @@ use nym_crypto::Aes256GcmSiv; type Aes128Ctr = ctr::Ctr64BE; // Re-export for ease of use +pub use key_rotation::SphinxKeyRotation; pub use packet_sizes::PacketSize; pub use packet_types::PacketType; +pub use packet_version::PacketVersion; +pub mod key_rotation; pub mod packet_sizes; pub mod packet_types; pub mod packet_version; diff --git a/common/nymsphinx/params/src/packet_types.rs b/common/nymsphinx/params/src/packet_types.rs index f3ad3d54b9f..c597d9fda80 100644 --- a/common/nymsphinx/params/src/packet_types.rs +++ b/common/nymsphinx/params/src/packet_types.rs @@ -11,7 +11,7 @@ use std::fmt; use thiserror::Error; #[derive(Error, Debug)] -#[error("{received} is not a valid packet mode tag")] +#[error("{received} is not a valid packet type tag")] pub struct InvalidPacketType { received: u8, } diff --git a/common/nymsphinx/params/src/packet_version.rs b/common/nymsphinx/params/src/packet_version.rs index d0d3f89fc62..02492ea77a9 100644 --- a/common/nymsphinx/params/src/packet_version.rs +++ b/common/nymsphinx/params/src/packet_version.rs @@ -10,13 +10,15 @@ use thiserror::Error; // - packet_version (starting with v1.1.0) // - packet_size indicator // - packet_type +// - sphinx key rotation (starting with v1.12.0/v1.13.0 - either Cheddar or Dolcelatte release) + // it also just so happens that the only valid values for packet_size indicator include values 1-6 // therefore if we receive byte `7` (or larger than that) we'll know we received a versioned packet, // otherwise we should treat it as legacy /// Increment it whenever we perform any breaking change in the wire format! pub const INITIAL_PACKET_VERSION_NUMBER: u8 = 7; - -pub const CURRENT_PACKET_VERSION_NUMBER: u8 = INITIAL_PACKET_VERSION_NUMBER; +pub const KEY_ROTATION_VERSION_NUMBER: u8 = 8; +pub const CURRENT_PACKET_VERSION_NUMBER: u8 = KEY_ROTATION_VERSION_NUMBER; pub const CURRENT_PACKET_VERSION: PacketVersion = PacketVersion::unchecked(CURRENT_PACKET_VERSION_NUMBER); @@ -38,6 +40,10 @@ impl PacketVersion { PacketVersion(CURRENT_PACKET_VERSION_NUMBER) } + pub fn is_initial(&self) -> bool { + self.0 == INITIAL_PACKET_VERSION_NUMBER + } + const fn unchecked(version: u8) -> PacketVersion { PacketVersion(version) } diff --git a/common/nymsphinx/src/preparer/mod.rs b/common/nymsphinx/src/preparer/mod.rs index e0c35200a85..9466ebe716e 100644 --- a/common/nymsphinx/src/preparer/mod.rs +++ b/common/nymsphinx/src/preparer/mod.rs @@ -13,13 +13,14 @@ use nym_sphinx_anonymous_replies::reply_surb::ReplySurb; use nym_sphinx_chunking::fragment::{Fragment, FragmentIdentifier}; use nym_sphinx_forwarding::packet::MixPacket; use nym_sphinx_params::packet_sizes::PacketSize; -use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm}; +use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm, SphinxKeyRotation}; use nym_sphinx_types::{Delay, NymPacket}; use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; use tracing::*; +use nym_sphinx_anonymous_replies::ReplySurbWithKeyRotation; use nym_sphinx_chunking::monitoring; use std::time::Duration; @@ -103,7 +104,7 @@ pub trait FragmentPreparer { fragment: Fragment, topology: &NymRouteProvider, ack_key: &AckKey, - reply_surb: ReplySurb, + reply_surb: ReplySurbWithKeyRotation, packet_sender: &Recipient, packet_type: PacketType, ) -> Result { @@ -148,7 +149,7 @@ pub trait FragmentPreparer { // the unwrap here is fine as the failures can only originate from attempting to use invalid payload lengths // and we just very carefully constructed a (presumably) valid one - let (sphinx_packet, first_hop_address) = reply_surb + let applied_surb = reply_surb .apply_surb(packet_payload, packet_size, packet_type) .unwrap(); @@ -157,7 +158,7 @@ pub trait FragmentPreparer { // well as the total delay of the ack packet. // we don't know the delays inside the reply surbs so we use best-effort estimation from our poisson distribution total_delay: expected_forward_delay + ack_delay, - mix_packet: MixPacket::new(first_hop_address, sphinx_packet, packet_type), + mix_packet: MixPacket::from_applied_surb(applied_surb, packet_type), fragment_identifier, }) } @@ -211,6 +212,9 @@ pub trait FragmentPreparer { let packet_size = PacketSize::get_type_from_plaintext(expected_plaintext, packet_type) .expect("the message has been incorrectly fragmented"); + let rotation_id = topology.current_key_rotation(); + let sphinx_key_rotation = SphinxKeyRotation::from(rotation_id); + let fragment_identifier = fragment.fragment_identifier(); // create an ack @@ -279,7 +283,7 @@ pub trait FragmentPreparer { // well as the total delay of the ack packet. // note that the last hop of the packet is a gateway that does not do any delays total_delay: delays.iter().take(delays.len() - 1).sum::() + ack_delay, - mix_packet: MixPacket::new(first_hop_address, packet, packet_type), + mix_packet: MixPacket::new(first_hop_address, packet, packet_type, sphinx_key_rotation), fragment_identifier, }) } @@ -371,10 +375,12 @@ where use_legacy_reply_surb_format: bool, amount: usize, topology: &NymRouteProvider, - ) -> Result, NymTopologyError> { + ) -> Result, NymTopologyError> { let mut reply_surbs = Vec::with_capacity(amount); let disabled_mix_hops = self.mix_hops_disabled(); + let key_rotation = SphinxKeyRotation::from(topology.current_key_rotation()); + for _ in 0..amount { let reply_surb = ReplySurb::construct( &mut self.rng, @@ -383,7 +389,8 @@ where use_legacy_reply_surb_format, topology, disabled_mix_hops, // TODO: support SURBs with no mix hops after changes to surb format / construction - )?; + )? + .with_key_rotation(key_rotation); reply_surbs.push(reply_surb) } @@ -395,7 +402,7 @@ where fragment: Fragment, topology: &NymRouteProvider, ack_key: &AckKey, - reply_surb: ReplySurb, + reply_surb: ReplySurbWithKeyRotation, packet_type: PacketType, ) -> Result { let sender = self.sender_address; diff --git a/common/nymsphinx/types/src/lib.rs b/common/nymsphinx/types/src/lib.rs index cd620e32b39..5ecd5c3fb13 100644 --- a/common/nymsphinx/types/src/lib.rs +++ b/common/nymsphinx/types/src/lib.rs @@ -50,6 +50,7 @@ pub enum NymPacketError { FromSlice(#[from] TryFromSliceError), } +// TODO: wrap that guy and add extra metadata to indicate key rotation? #[allow(clippy::large_enum_variant)] pub enum NymPacket { #[cfg(feature = "sphinx")] diff --git a/common/pemstore/Cargo.toml b/common/pemstore/Cargo.toml index f8d13391dde..9c93dc122d6 100644 --- a/common/pemstore/Cargo.toml +++ b/common/pemstore/Cargo.toml @@ -9,4 +9,5 @@ repository = { workspace = true } [dependencies] pem = { workspace = true } -tracing = { workspace = true } \ No newline at end of file +tracing = { workspace = true } +zeroize = { workspace = true } \ No newline at end of file diff --git a/common/pemstore/src/lib.rs b/common/pemstore/src/lib.rs index 9f6d3e283b5..0829392dfb7 100644 --- a/common/pemstore/src/lib.rs +++ b/common/pemstore/src/lib.rs @@ -5,12 +5,35 @@ use crate::traits::{PemStorableKey, PemStorableKeyPair}; use pem::Pem; use std::fs::File; use std::io::{self, Read, Write}; +use std::ops::Deref; use std::path::{Path, PathBuf}; use tracing::debug; +use zeroize::{Zeroize, Zeroizing}; pub mod traits; -#[derive(Debug, Default)] +struct ZeroizingPem(Pem); + +impl Zeroize for ZeroizingPem { + fn zeroize(&mut self) { + self.0.tag.zeroize(); + self.0.contents.zeroize(); + } +} +impl Drop for ZeroizingPem { + fn drop(&mut self) { + self.zeroize(); + } +} + +impl Deref for ZeroizingPem { + type Target = Pem; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[derive(Debug, Clone, Default)] pub struct KeyPairPath { pub private_key_path: PathBuf, pub public_key_path: PathBuf, @@ -58,7 +81,7 @@ where io::ErrorKind::Other, format!( "unexpected key pem tag. Got '{}', expected: '{}'", - key_pem.tag, + key_pem.0.tag, T::pem_type() ), )); @@ -80,25 +103,33 @@ where write_pem_file(path, key.to_bytes(), T::pem_type()) } -fn read_pem_file>(filepath: P) -> io::Result { +fn read_pem_file>(filepath: P) -> io::Result { let mut pem_bytes = File::open(filepath)?; - let mut buf = Vec::new(); + let mut buf = Zeroizing::new(Vec::new()); pem_bytes.read_to_end(&mut buf)?; - pem::parse(&buf).map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + pem::parse(&buf) + .map(ZeroizingPem) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) } -fn write_pem_file>(filepath: P, data: Vec, tag: &str) -> io::Result<()> { +fn write_pem_file>(filepath: P, mut data: Vec, tag: &str) -> io::Result<()> { // ensure the whole directory structure exists if let Some(parent_dir) = filepath.as_ref().parent() { - std::fs::create_dir_all(parent_dir)?; + if let Err(err) = std::fs::create_dir_all(parent_dir) { + // in case of a failure, make sure to zeroize the data before returning + // (we can't wrap it in `Zeroize` due to `Pem` requirements) + data.zeroize(); + return Err(err); + } } - let pem = Pem { - tag: tag.to_string(), - contents: data, - }; - let key = pem::encode(&pem); let mut file = File::create(filepath.as_ref())?; + + let pem = ZeroizingPem(Pem { + tag: tag.to_string(), + contents: data, + }); + let key = Zeroizing::new(pem::encode(&pem)); file.write_all(key.as_bytes())?; // note: this is only supported on unix (on different systems, like Windows, it will just diff --git a/common/task/src/cancellation.rs b/common/task/src/cancellation.rs index 8639a272f12..9e120b4ec55 100644 --- a/common/task/src/cancellation.rs +++ b/common/task/src/cancellation.rs @@ -70,6 +70,10 @@ impl ShutdownToken { } } + pub fn ephemeral() -> Self { + ShutdownToken::new("ephemeral-token") + } + // Creates a ShutdownToken which will get cancelled whenever the current token gets cancelled. // Unlike a cloned/forked ShutdownToken, cancelling a child token does not cancel the parent token. #[must_use] diff --git a/common/topology/src/lib.rs b/common/topology/src/lib.rs index 1dc0fa695b1..53886ed5fdb 100644 --- a/common/topology/src/lib.rs +++ b/common/topology/src/lib.rs @@ -3,6 +3,8 @@ use ::serde::{Deserialize, Serialize}; use nym_api_requests::nym_nodes::SkimmedNode; +use nym_crypto::asymmetric::ed25519; +use nym_mixnet_contract_common::EpochId; use nym_sphinx_addressing::nodes::NodeIdentity; use nym_sphinx_types::Node as SphinxNode; use rand::prelude::IteratorRandom; @@ -91,8 +93,39 @@ mod deprecated_network_address_impls { pub type MixLayer = u8; +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub struct NymTopologyMetadata { + key_rotation_id: u32, + // we have to keep track of key rotation id anyway, so we might as well also include the epoch id + // to keep track of the data staleness + absolute_epoch_id: EpochId, +} + +impl NymTopologyMetadata { + pub fn new(key_rotation_id: u32, absolute_epoch_id: EpochId) -> Self { + NymTopologyMetadata { + key_rotation_id, + absolute_epoch_id, + } + } +} + +impl Default for NymTopologyMetadata { + fn default() -> Self { + // that's not ideal, but we don't want to break backwards compatibility : / + NymTopologyMetadata { + key_rotation_id: u32::MAX, + absolute_epoch_id: 0, + } + } +} + #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct NymTopology { + // while this is not ideal, use empty values as default to not break backwards compatibility + #[serde(default)] + metadata: NymTopologyMetadata, + // for the purposes of future VRF, everyone will need the same view of the network, regardless of performance filtering // so we use the same 'master' rewarded set information for that // @@ -128,6 +161,14 @@ impl NymRouteProvider { } } + pub fn current_key_rotation(&self) -> u32 { + self.topology.metadata.key_rotation_id + } + + pub fn absolute_epoch_id(&self) -> EpochId { + self.topology.metadata.absolute_epoch_id + } + pub fn new_empty(ignore_egress_epoch_roles: bool) -> NymRouteProvider { let this: Self = NymTopology::default().into(); this.with_ignore_egress_epoch_roles(ignore_egress_epoch_roles) @@ -201,18 +242,22 @@ impl NymRouteProvider { } impl NymTopology { + #[deprecated] pub fn new_empty(rewarded_set: impl Into) -> Self { NymTopology { + metadata: NymTopologyMetadata::default(), rewarded_set: rewarded_set.into(), node_details: Default::default(), } } pub fn new( + metadata: NymTopologyMetadata, rewarded_set: impl Into, node_details: Vec, ) -> Self { NymTopology { + metadata, rewarded_set: rewarded_set.into(), node_details: node_details.into_iter().map(|n| (n.node_id, n)).collect(), } @@ -228,6 +273,11 @@ impl NymTopology { self.add_additional_nodes(nodes.iter()) } + pub fn with_skimmed_nodes(mut self, nodes: &[SkimmedNode]) -> Self { + self.add_skimmed_nodes(nodes); + self + } + pub fn add_routing_nodes>( &mut self, nodes: impl IntoIterator, @@ -278,6 +328,12 @@ impl NymTopology { self.node_details.contains_key(&node_id) } + pub fn has_node(&self, identity: ed25519::PublicKey) -> bool { + self.node_details + .values() + .any(|node_details| node_details.identity_key == identity) + } + pub fn insert_node_details(&mut self, node_details: RoutingNode) { self.node_details.insert(node_details.node_id, node_details); } diff --git a/common/topology/src/wasm_helpers.rs b/common/topology/src/wasm_helpers.rs index ecb451e8beb..5ebce454550 100644 --- a/common/topology/src/wasm_helpers.rs +++ b/common/topology/src/wasm_helpers.rs @@ -5,7 +5,7 @@ #![allow(clippy::empty_docs)] use crate::node::{EntryDetails, RoutingNode, RoutingNodeError, SupportedRoles}; -use crate::{CachedEpochRewardedSet, NymTopology}; +use crate::{CachedEpochRewardedSet, NymTopology, NymTopologyMetadata}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::net::SocketAddr; @@ -38,6 +38,8 @@ impl From for JsValue { #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] pub struct WasmFriendlyNymTopology { + pub metadata: NymTopologyMetadata, + pub rewarded_set: CachedEpochRewardedSet, pub node_details: HashMap, @@ -53,13 +55,18 @@ impl TryFrom for NymTopology { .map(|details| details.try_into()) .collect::>()?; - Ok(NymTopology::new(value.rewarded_set, node_details)) + Ok(NymTopology::new( + value.metadata, + value.rewarded_set, + node_details, + )) } } impl From for WasmFriendlyNymTopology { fn from(value: NymTopology) -> Self { WasmFriendlyNymTopology { + metadata: value.metadata, rewarded_set: value.rewarded_set, node_details: value .node_details diff --git a/common/types/src/error.rs b/common/types/src/error.rs index 2a4595a495a..823ec8b375e 100644 --- a/common/types/src/error.rs +++ b/common/types/src/error.rs @@ -84,6 +84,8 @@ pub enum TypesError { NotADelegationEvent, #[error("Unknown network - {0}")] UnknownNetwork(String), + #[error("the response metadata has changed between pages")] + InconsistentPagedMetadata, } impl Serialize for TypesError { @@ -103,6 +105,9 @@ impl From for TypesError { ValidatorClientError::NyxdError(e) => e.into(), ValidatorClientError::NoAPIUrlAvailable => TypesError::NoNymApiUrlConfigured, ValidatorClientError::TendermintErrorRpc(err) => err.into(), + ValidatorClientError::InconsistentPagedMetadata => { + TypesError::InconsistentPagedMetadata + } } } } diff --git a/contracts/Cargo.lock b/contracts/Cargo.lock index f2463797745..dd138beb211 100644 --- a/contracts/Cargo.lock +++ b/contracts/Cargo.lock @@ -1193,7 +1193,6 @@ version = "1.5.1" dependencies = [ "anyhow", "bs58", - "cosmwasm-derive", "cosmwasm-schema", "cosmwasm-std", "cw-controllers", @@ -1208,8 +1207,6 @@ dependencies = [ "rand_chacha", "semver", "serde", - "thiserror 2.0.12", - "time", ] [[package]] @@ -1262,6 +1259,7 @@ version = "0.3.0" dependencies = [ "pem", "tracing", + "zeroize", ] [[package]] diff --git a/contracts/Cargo.toml b/contracts/Cargo.toml index ce2f7df73b0..401a56852ab 100644 --- a/contracts/Cargo.toml +++ b/contracts/Cargo.toml @@ -52,3 +52,13 @@ sylvia = "1.3.3" schemars = "0.8.16" thiserror = "2.0.11" + +[workspace.lints.clippy] +unwrap_used = "deny" +expect_used = "deny" +todo = "deny" +dbg_macro = "deny" +exit = "deny" +panic = "deny" +unimplemented = "deny" +unreachable = "deny" \ No newline at end of file diff --git a/contracts/mixnet-vesting-integration-tests/src/support/fixtures.rs b/contracts/mixnet-vesting-integration-tests/src/support/fixtures.rs index 3b857d6dbae..2df1a87e351 100644 --- a/contracts/mixnet-vesting-integration-tests/src/support/fixtures.rs +++ b/contracts/mixnet-vesting-integration-tests/src/support/fixtures.rs @@ -34,5 +34,6 @@ pub fn default_mixnet_init_msg() -> nym_mixnet_contract_common::InstantiateMsg { version_score_params: Default::default(), profit_margin: Default::default(), interval_operating_cost: Default::default(), + key_validity_in_epochs: None, } } diff --git a/contracts/mixnet/Cargo.toml b/contracts/mixnet/Cargo.toml index 917ad82e88a..9fa22fe0dbb 100644 --- a/contracts/mixnet/Cargo.toml +++ b/contracts/mixnet/Cargo.toml @@ -33,7 +33,6 @@ nym-contracts-common = { path = "../../common/cosmwasm-smart-contracts/contracts cosmwasm-schema = { workspace = true, optional = true } cosmwasm-std = { workspace = true } -cosmwasm-derive = { workspace = true } cw-controllers = { workspace = true } cw2 = { workspace = true } cw-storage-plus = { workspace = true } @@ -41,8 +40,6 @@ cw-storage-plus = { workspace = true } bs58 = { workspace = true } serde = { workspace = true, default-features = false, features = ["derive"] } semver = { workspace = true } -thiserror = { workspace = true } -time = { version = "0.3", features = ["macros"] } [dev-dependencies] anyhow.workspace = true @@ -55,3 +52,6 @@ easy-addr = { path = "../../common/cosmwasm-smart-contracts/easy_addr" } default = [] contract-testing = ["mixnet-contract-common/contract-testing"] schema-gen = ["mixnet-contract-common/schema", "cosmwasm-schema"] + +[lints] +workspace = true \ No newline at end of file diff --git a/contracts/mixnet/schema/nym-mixnet-contract.json b/contracts/mixnet/schema/nym-mixnet-contract.json index 3b80f44e3a6..cf365639499 100644 --- a/contracts/mixnet/schema/nym-mixnet-contract.json +++ b/contracts/mixnet/schema/nym-mixnet-contract.json @@ -41,6 +41,15 @@ } ] }, + "key_validity_in_epochs": { + "default": null, + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, "profit_margin": { "default": { "maximum": "1", @@ -3440,6 +3449,34 @@ } }, "additionalProperties": false + }, + { + "description": "Gets the current state config of the key rotation (i.e. starting epoch id and validity duration)", + "type": "object", + "required": [ + "get_key_rotation_state" + ], + "properties": { + "get_key_rotation_state": { + "type": "object", + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Gets the current key rotation id", + "type": "object", + "required": [ + "get_key_rotation_id" + ], + "properties": { + "get_key_rotation_id": { + "type": "object", + "additionalProperties": false + } + }, + "additionalProperties": false } ], "definitions": { @@ -5116,6 +5153,46 @@ } } }, + "get_key_rotation_id": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "KeyRotationIdResponse", + "type": "object", + "required": [ + "rotation_id" + ], + "properties": { + "rotation_id": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, + "get_key_rotation_state": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "KeyRotationState", + "type": "object", + "required": [ + "initial_epoch_id", + "validity_epochs" + ], + "properties": { + "initial_epoch_id": { + "description": "Records the initial epoch_id when the key rotation has been introduced (0 for fresh contracts). It is used for determining when rotation is meant to advance.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "validity_epochs": { + "description": "Defines how long each key rotation is valid for (in terms of epochs)", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false + }, "get_mix_node_bonds": { "$schema": "http://json-schema.org/draft-07/schema#", "title": "PagedMixnodeBondsResponse", diff --git a/contracts/mixnet/schema/raw/instantiate.json b/contracts/mixnet/schema/raw/instantiate.json index 0eca5410beb..b17e1ae6bf5 100644 --- a/contracts/mixnet/schema/raw/instantiate.json +++ b/contracts/mixnet/schema/raw/instantiate.json @@ -37,6 +37,15 @@ } ] }, + "key_validity_in_epochs": { + "default": null, + "type": [ + "integer", + "null" + ], + "format": "uint32", + "minimum": 0.0 + }, "profit_margin": { "default": { "maximum": "1", diff --git a/contracts/mixnet/schema/raw/query.json b/contracts/mixnet/schema/raw/query.json index fdf631ebce2..6ec5fdf579e 100644 --- a/contracts/mixnet/schema/raw/query.json +++ b/contracts/mixnet/schema/raw/query.json @@ -1486,6 +1486,34 @@ } }, "additionalProperties": false + }, + { + "description": "Gets the current state config of the key rotation (i.e. starting epoch id and validity duration)", + "type": "object", + "required": [ + "get_key_rotation_state" + ], + "properties": { + "get_key_rotation_state": { + "type": "object", + "additionalProperties": false + } + }, + "additionalProperties": false + }, + { + "description": "Gets the current key rotation id", + "type": "object", + "required": [ + "get_key_rotation_id" + ], + "properties": { + "get_key_rotation_id": { + "type": "object", + "additionalProperties": false + } + }, + "additionalProperties": false } ], "definitions": { diff --git a/contracts/mixnet/schema/raw/response_to_get_key_rotation_id.json b/contracts/mixnet/schema/raw/response_to_get_key_rotation_id.json new file mode 100644 index 00000000000..f0bdf2fbd16 --- /dev/null +++ b/contracts/mixnet/schema/raw/response_to_get_key_rotation_id.json @@ -0,0 +1,16 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "KeyRotationIdResponse", + "type": "object", + "required": [ + "rotation_id" + ], + "properties": { + "rotation_id": { + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false +} diff --git a/contracts/mixnet/schema/raw/response_to_get_key_rotation_state.json b/contracts/mixnet/schema/raw/response_to_get_key_rotation_state.json new file mode 100644 index 00000000000..efd32685f86 --- /dev/null +++ b/contracts/mixnet/schema/raw/response_to_get_key_rotation_state.json @@ -0,0 +1,24 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "KeyRotationState", + "type": "object", + "required": [ + "initial_epoch_id", + "validity_epochs" + ], + "properties": { + "initial_epoch_id": { + "description": "Records the initial epoch_id when the key rotation has been introduced (0 for fresh contracts). It is used for determining when rotation is meant to advance.", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "validity_epochs": { + "description": "Defines how long each key rotation is valid for (in terms of epochs)", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + } + }, + "additionalProperties": false +} diff --git a/contracts/mixnet/src/constants.rs b/contracts/mixnet/src/constants.rs index 50cf7c2541f..2bcb2ba4ea7 100644 --- a/contracts/mixnet/src/constants.rs +++ b/contracts/mixnet/src/constants.rs @@ -78,6 +78,7 @@ pub const NYMNODE_ROLES_ASSIGNMENT_NAMESPACE: &str = "roles"; pub const NYMNODE_REWARDED_SET_METADATA_NAMESPACE: &str = "roles_metadata"; pub const NYMNODE_ACTIVE_ROLE_ASSIGNMENT_KEY: &str = "active_roles"; +pub const KEY_ROTATION_STATE_KEY: &str = "key_rot_state"; pub const NODE_ID_COUNTER_KEY: &str = "nic"; pub const PENDING_MIXNODE_CHANGES_NAMESPACE: &str = "pmc"; pub const MIXNODES_PK_NAMESPACE: &str = "mnn"; diff --git a/contracts/mixnet/src/contract.rs b/contracts/mixnet/src/contract.rs index db9e3f44f49..522017c7625 100644 --- a/contracts/mixnet/src/contract.rs +++ b/contracts/mixnet/src/contract.rs @@ -5,6 +5,7 @@ use crate::constants::INITIAL_PLEDGE_AMOUNT; use crate::interval::storage as interval_storage; use crate::mixnet_contract_settings::storage as mixnet_params_storage; use crate::nodes::storage as nymnodes_storage; +use crate::queued_migrations::introduce_key_rotation_id; use crate::rewards::storage::RewardingStorage; use cosmwasm_std::{ entry_point, to_json_binary, Addr, Coin, Deps, DepsMut, Env, MessageInfo, QueryResponse, @@ -82,6 +83,11 @@ pub fn instantiate( }); } + let key_rotation_validity = msg.key_validity_in_epochs(); + if key_rotation_validity < InstantiateMsg::MIN_KEY_ROTATION_VALIDITY { + return Err(MixnetContractError::TooShortRotationInterval); + } + let rewarding_validator_address = deps.api.addr_validate(&msg.rewarding_validator_address)?; let vesting_contract_address = deps.api.addr_validate(&msg.vesting_contract_address)?; let state = default_initial_state( @@ -109,7 +115,7 @@ pub fn instantiate( msg.current_nym_node_version, )?; RewardingStorage::new().initialise(deps.storage, reward_params)?; - nymnodes_storage::initialise_storage(deps.storage)?; + nymnodes_storage::initialise_storage(deps.storage, key_rotation_validity)?; cw2::set_contract_version(deps.storage, CONTRACT_NAME, CONTRACT_VERSION)?; set_build_information!(deps.storage)?; @@ -598,6 +604,14 @@ pub fn query( QueryMsg::GetSigningNonce { address } => to_json_binary( &crate::signing::queries::query_current_signing_nonce(deps, address)?, ), + + // sphinx key rotation-related + QueryMsg::GetKeyRotationState {} => { + to_json_binary(&crate::nodes::queries::query_key_rotation_state(deps)?) + } + QueryMsg::GetKeyRotationId {} => { + to_json_binary(&crate::nodes::queries::query_key_rotation_id(deps)?) + } }; Ok(query_res?) @@ -605,18 +619,18 @@ pub fn query( #[entry_point] pub fn migrate( - deps: DepsMut<'_>, + mut deps: DepsMut<'_>, _env: Env, msg: MigrateMsg, ) -> Result { set_build_information!(deps.storage)?; cw2::ensure_from_older_version(deps.storage, CONTRACT_NAME, CONTRACT_VERSION)?; - // let skip_state_updates = msg.unsafe_skip_state_updates.unwrap_or(false); - // - // if !skip_state_updates { - // - // } + let skip_state_updates = msg.unsafe_skip_state_updates.unwrap_or(false); + + if !skip_state_updates { + introduce_key_rotation_id(deps.branch())?; + } // due to circular dependency on contract addresses (i.e. mixnet contract requiring vesting contract address // and vesting contract requiring the mixnet contract address), if we ever want to deploy any new fresh @@ -681,6 +695,7 @@ mod tests { minimum: "1000".parse().unwrap(), maximum: "10000".parse().unwrap(), }, + key_validity_in_epochs: None, }; let sender = message_info(&deps.api.addr_make("sender"), &[]); diff --git a/contracts/mixnet/src/nodes/queries.rs b/contracts/mixnet/src/nodes/queries.rs index 6283d21d751..02bc5276fc2 100644 --- a/contracts/mixnet/src/nodes/queries.rs +++ b/contracts/mixnet/src/nodes/queries.rs @@ -6,6 +6,7 @@ use crate::constants::{ NYM_NODE_DETAILS_DEFAULT_RETRIEVAL_LIMIT, NYM_NODE_DETAILS_MAX_RETRIEVAL_LIMIT, UNBONDED_NYM_NODES_DEFAULT_RETRIEVAL_LIMIT, UNBONDED_NYM_NODES_MAX_RETRIEVAL_LIMIT, }; +use crate::interval::storage as interval_storage; use crate::nodes::helpers::{ attach_nym_node_details, get_node_details_by_id, get_node_details_by_identity, get_node_details_by_owner, @@ -21,7 +22,9 @@ use mixnet_contract_common::nym_node::{ PagedNymNodeDetailsResponse, PagedUnbondedNymNodesResponse, Role, RolesMetadataResponse, StakeSaturationResponse, UnbondedNodeResponse, }; -use mixnet_contract_common::{NodeId, NymNodeBond, NymNodeDetails}; +use mixnet_contract_common::{ + KeyRotationIdResponse, KeyRotationState, NodeId, NymNodeBond, NymNodeDetails, +}; use nym_contracts_common::IdentityKey; pub(crate) fn query_nymnode_bonds_paged( @@ -257,3 +260,14 @@ pub fn query_stake_saturation( uncapped_saturation: Some(node_rewarding.uncapped_bond_saturation(&rewarding_params)), }) } + +pub fn query_key_rotation_state(deps: Deps<'_>) -> StdResult { + storage::KEY_ROTATION_STATE.load(deps.storage) +} + +pub fn query_key_rotation_id(deps: Deps<'_>) -> StdResult { + let interval = interval_storage::current_interval(deps.storage)?; + let rotation_state = storage::KEY_ROTATION_STATE.load(deps.storage)?; + let rotation_id = rotation_state.key_rotation_id(interval.current_epoch_absolute_id()); + Ok(KeyRotationIdResponse { rotation_id }) +} diff --git a/contracts/mixnet/src/nodes/storage/helpers.rs b/contracts/mixnet/src/nodes/storage/helpers.rs index 169a0d42bc3..7c8ae9dc6e7 100644 --- a/contracts/mixnet/src/nodes/storage/helpers.rs +++ b/contracts/mixnet/src/nodes/storage/helpers.rs @@ -2,11 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 use crate::nodes::storage::rewarded_set::{ACTIVE_ROLES_BUCKET, ROLES, ROLES_METADATA}; -use crate::nodes::storage::{nym_nodes, NYMNODE_ID_COUNTER}; +use crate::nodes::storage::{nym_nodes, KEY_ROTATION_STATE, NYMNODE_ID_COUNTER}; use cosmwasm_std::{StdResult, Storage}; use mixnet_contract_common::error::MixnetContractError; use mixnet_contract_common::nym_node::{RewardedSetMetadata, Role}; -use mixnet_contract_common::{EpochId, NodeId, NymNodeBond, RoleAssignment}; +use mixnet_contract_common::{EpochId, KeyRotationState, NodeId, NymNodeBond, RoleAssignment}; use serde::{Deserialize, Serialize}; #[derive(Copy, Clone, Default, Debug, Serialize, Deserialize, Eq, PartialEq)] @@ -103,7 +103,10 @@ pub(crate) fn next_nymnode_id_counter(store: &mut dyn Storage) -> StdResult Result<(), MixnetContractError> { +pub(crate) fn initialise_storage( + storage: &mut dyn Storage, + key_rotation_validity: u32, +) -> Result<(), MixnetContractError> { let active_bucket = RoleStorageBucket::default(); let inactive_bucket = active_bucket.other(); @@ -124,6 +127,15 @@ pub(crate) fn initialise_storage(storage: &mut dyn Storage) -> Result<(), Mixnet ROLES_METADATA.save(storage, active_bucket as u8, &Default::default())?; ROLES_METADATA.save(storage, inactive_bucket as u8, &Default::default())?; + // since we're initialising fresh storage, the current epoch_id is 0 + KEY_ROTATION_STATE.save( + storage, + &KeyRotationState { + validity_epochs: key_rotation_validity, + initial_epoch_id: 0, + }, + )?; + Ok(()) } diff --git a/contracts/mixnet/src/nodes/storage/mod.rs b/contracts/mixnet/src/nodes/storage/mod.rs index 41e7aea0335..fd45ee613aa 100644 --- a/contracts/mixnet/src/nodes/storage/mod.rs +++ b/contracts/mixnet/src/nodes/storage/mod.rs @@ -2,23 +2,26 @@ // SPDX-License-Identifier: Apache-2.0 use crate::constants::{ - NODE_ID_COUNTER_KEY, NYMNODE_ACTIVE_ROLE_ASSIGNMENT_KEY, NYMNODE_IDENTITY_IDX_NAMESPACE, - NYMNODE_OWNER_IDX_NAMESPACE, NYMNODE_PK_NAMESPACE, NYMNODE_REWARDED_SET_METADATA_NAMESPACE, - NYMNODE_ROLES_ASSIGNMENT_NAMESPACE, PENDING_NYMNODE_CHANGES_NAMESPACE, - UNBONDED_NYMNODE_IDENTITY_IDX_NAMESPACE, UNBONDED_NYMNODE_OWNER_IDX_NAMESPACE, - UNBONDED_NYMNODE_PK_NAMESPACE, + KEY_ROTATION_STATE_KEY, NODE_ID_COUNTER_KEY, NYMNODE_ACTIVE_ROLE_ASSIGNMENT_KEY, + NYMNODE_IDENTITY_IDX_NAMESPACE, NYMNODE_OWNER_IDX_NAMESPACE, NYMNODE_PK_NAMESPACE, + NYMNODE_REWARDED_SET_METADATA_NAMESPACE, NYMNODE_ROLES_ASSIGNMENT_NAMESPACE, + PENDING_NYMNODE_CHANGES_NAMESPACE, UNBONDED_NYMNODE_IDENTITY_IDX_NAMESPACE, + UNBONDED_NYMNODE_OWNER_IDX_NAMESPACE, UNBONDED_NYMNODE_PK_NAMESPACE, }; use crate::nodes::storage::helpers::RoleStorageBucket; use cosmwasm_std::Addr; use cw_storage_plus::{Index, IndexList, IndexedMap, Item, Map, MultiIndex, UniqueIndex}; use mixnet_contract_common::nym_node::{NymNodeBond, RewardedSetMetadata, Role, UnbondedNymNode}; -use mixnet_contract_common::{NodeId, PendingNodeChanges}; +use mixnet_contract_common::{KeyRotationState, NodeId, PendingNodeChanges}; use nym_contracts_common::IdentityKey; pub(crate) mod helpers; pub(crate) use helpers::*; +/// Item recording the current state of the key rotation setup +pub const KEY_ROTATION_STATE: Item = Item::new(KEY_ROTATION_STATE_KEY); + // IMPORTANT NOTE: we're using the same storage key as we had for MIXNODE_ID_COUNTER, // so that we could start from the old values pub const NYMNODE_ID_COUNTER: Item = Item::new(NODE_ID_COUNTER_KEY); diff --git a/contracts/mixnet/src/queued_migrations.rs b/contracts/mixnet/src/queued_migrations.rs index a0273196bee..4f9690a87e4 100644 --- a/contracts/mixnet/src/queued_migrations.rs +++ b/contracts/mixnet/src/queued_migrations.rs @@ -1,2 +1,21 @@ -// Copyright 2022-2024 - Nym Technologies SA +// Copyright 2022-2025 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 + +use crate::interval::storage as interval_storage; +use crate::nodes::storage as nymnodes_storage; +use cosmwasm_std::DepsMut; +use mixnet_contract_common::error::MixnetContractError; +use mixnet_contract_common::KeyRotationState; + +pub fn introduce_key_rotation_id(deps: DepsMut) -> Result<(), MixnetContractError> { + let current_epoch_id = + interval_storage::current_interval(deps.storage)?.current_epoch_absolute_id(); + nymnodes_storage::KEY_ROTATION_STATE.save( + deps.storage, + &KeyRotationState { + validity_epochs: 24, + initial_epoch_id: current_epoch_id, + }, + )?; + Ok(()) +} diff --git a/contracts/mixnet/src/rewards/transactions.rs b/contracts/mixnet/src/rewards/transactions.rs index f07a9c64a13..7f0b18077ec 100644 --- a/contracts/mixnet/src/rewards/transactions.rs +++ b/contracts/mixnet/src/rewards/transactions.rs @@ -313,6 +313,8 @@ pub(crate) fn try_update_rewarding_params( } } +#[allow(clippy::panic)] +#[allow(clippy::unreachable)] #[cfg(test)] pub mod tests { use super::*; diff --git a/contracts/mixnet/src/support/tests/mod.rs b/contracts/mixnet/src/support/tests/mod.rs index 61974e0da3d..1418f0d4cf9 100644 --- a/contracts/mixnet/src/support/tests/mod.rs +++ b/contracts/mixnet/src/support/tests/mod.rs @@ -1,6 +1,11 @@ // Copyright 2021-2023 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 +// fine in test code +#![allow(clippy::panic)] +#![allow(clippy::unreachable)] +#![allow(clippy::unimplemented)] + #[cfg(test)] pub mod fixtures; pub(crate) mod legacy; @@ -1853,6 +1858,7 @@ pub mod test_helpers { version_score_params: Default::default(), profit_margin: Default::default(), interval_operating_cost: Default::default(), + key_validity_in_epochs: None, }; let env = mock_env(); let info = sender("creator"); diff --git a/contracts/mixnet/src/vesting_migration.rs b/contracts/mixnet/src/vesting_migration.rs index 152c4adb87f..683b7a61021 100644 --- a/contracts/mixnet/src/vesting_migration.rs +++ b/contracts/mixnet/src/vesting_migration.rs @@ -212,6 +212,7 @@ pub(crate) fn try_migrate_vested_delegation( )?)) } +#[allow(clippy::panic)] #[cfg(test)] mod tests { use super::*; diff --git a/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs b/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs index 5ccac5ac649..e3b560ec94e 100644 --- a/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs +++ b/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs @@ -314,7 +314,8 @@ impl AuthenticatedHandler { } Ok(request) => match request { // currently only a single type exists - BinaryRequest::ForwardSphinx { packet } => { + BinaryRequest::ForwardSphinx { packet } + | BinaryRequest::ForwardSphinxV2 { packet } => { self.handle_forward_sphinx(packet).await.into_ws_message() } _ => RequestHandlingError::UnknownBinaryRequest.into_error_message(), diff --git a/nym-api/enter_db.sh b/nym-api/enter_db.sh new file mode 100755 index 00000000000..bdf5056178e --- /dev/null +++ b/nym-api/enter_db.sh @@ -0,0 +1,2 @@ +#!/bin/sh +sqlite3 -init settings.sql /Users/jedrzej/workspace/nym/target/debug/build/nym-api-6c1428b6c3c63c2f/out/nym-api-example.sqlite \ No newline at end of file diff --git a/nym-api/nym-api-requests/Cargo.toml b/nym-api/nym-api-requests/Cargo.toml index e75f21b0bad..a8bf382ffcc 100644 --- a/nym-api/nym-api-requests/Cargo.toml +++ b/nym-api/nym-api-requests/Cargo.toml @@ -23,6 +23,7 @@ thiserror.workspace = true time = { workspace = true, features = ["serde", "parsing", "formatting"] } ts-rs = { workspace = true, optional = true } utoipa.workspace = true +tracing = { workspace = true } # for serde on secp256k1 signatures ecdsa = { workspace = true, features = ["serde"] } diff --git a/nym-api/nym-api-requests/src/models.rs b/nym-api/nym-api-requests/src/models.rs index a39b6f25c78..b30847d59b1 100644 --- a/nym-api/nym-api-requests/src/models.rs +++ b/nym-api/nym-api-requests/src/models.rs @@ -32,6 +32,7 @@ use schemars::gen::SchemaGenerator; use schemars::schema::{InstanceType, Schema, SchemaObject}; use schemars::JsonSchema; use serde::{Deserialize, Deserializer, Serialize}; +use std::cmp::Ordering; use std::collections::BTreeMap; use std::fmt::{Debug, Display, Formatter}; use std::net::IpAddr; @@ -39,8 +40,10 @@ use std::ops::{Deref, DerefMut}; use std::{fmt, time::Duration}; use thiserror::Error; use time::{Date, OffsetDateTime}; +use tracing::{error, warn}; use utoipa::{IntoParams, ToResponse, ToSchema}; +pub use nym_mixnet_contract_common::KeyRotationState; pub use nym_node_requests::api::v1::node::models::BinaryBuildInformationOwned; #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema)] @@ -72,7 +75,9 @@ impl Display for RequestError { } } -#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema, ToSchema)] +#[derive( + Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema, ToSchema, Default, +)] #[cfg_attr(feature = "generate-ts", derive(ts_rs::TS))] #[cfg_attr( feature = "generate-ts", @@ -86,6 +91,7 @@ pub enum MixnodeStatus { Active, // in both the active set and the rewarded set Standby, // only in the rewarded set Inactive, // in neither the rewarded set nor the active set, but is bonded + #[default] NotFound, // doesn't even exist in the bonded set } impl MixnodeStatus { @@ -860,11 +866,17 @@ pub struct HostKeys { #[schema(value_type = String)] pub ed25519: ed25519::PublicKey, + #[deprecated(note = "use the current_x25519_sphinx_key with explicit rotation information")] #[serde(with = "bs58_x25519_pubkey")] #[schemars(with = "String")] #[schema(value_type = String)] pub x25519: x25519::PublicKey, + pub current_x25519_sphinx_key: SphinxKey, + + #[serde(default)] + pub pre_announced_x25519_sphinx_key: Option, + #[serde(default)] #[serde(with = "option_bs58_x25519_pubkey")] #[schemars(with = "Option")] @@ -872,11 +884,32 @@ pub struct HostKeys { pub x25519_noise: Option, } +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] +pub struct SphinxKey { + pub rotation_id: u32, + + #[serde(with = "bs58_x25519_pubkey")] + #[schemars(with = "String")] + #[schema(value_type = String)] + pub public_key: x25519::PublicKey, +} + +impl From for SphinxKey { + fn from(value: nym_node_requests::api::v1::node::models::SphinxKey) -> Self { + SphinxKey { + rotation_id: value.rotation_id, + public_key: value.public_key, + } + } +} + impl From for HostKeys { fn from(value: nym_node_requests::api::v1::node::models::HostKeys) -> Self { HostKeys { ed25519: value.ed25519_identity, x25519: value.x25519_sphinx, + current_x25519_sphinx_key: value.primary_x25519_sphinx_key.into(), + pre_announced_x25519_sphinx_key: value.pre_announced_x25519_sphinx_key.map(Into::into), x25519_noise: value.x25519_noise, } } @@ -999,7 +1032,39 @@ impl NymNodeDescription { self.description.host_information.keys.ed25519 } - pub fn to_skimmed_node(&self, role: NodeRole, performance: Performance) -> SkimmedNode { + pub fn current_sphinx_key(&self, current_rotation_id: u32) -> x25519::PublicKey { + let keys = &self.description.host_information.keys; + + if keys.current_x25519_sphinx_key.rotation_id == u32::MAX { + // legacy case (i.e. node doesn't support rotation) + return keys.current_x25519_sphinx_key.public_key; + } + + if current_rotation_id == keys.current_x25519_sphinx_key.rotation_id { + // it's the 'current' key + return keys.current_x25519_sphinx_key.public_key; + } + + if let Some(pre_announced) = &keys.pre_announced_x25519_sphinx_key { + if pre_announced.rotation_id == current_rotation_id { + return pre_announced.public_key; + } + } + + warn!( + "unexpected key rotation {current_rotation_id} for node {}", + self.node_id + ); + // this should never be reached, but just in case, return the fallback option + keys.current_x25519_sphinx_key.public_key + } + + pub fn to_skimmed_node( + &self, + current_rotation_id: u32, + role: NodeRole, + performance: Performance, + ) -> SkimmedNode { let keys = &self.description.host_information.keys; let entry = if self.description.declared_role.entry { Some(self.entry_information()) @@ -1012,7 +1077,7 @@ impl NymNodeDescription { ed25519_identity_pubkey: keys.ed25519, ip_addresses: self.description.host_information.ip_address.clone(), mix_port: self.description.mix_port(), - x25519_sphinx_pubkey: keys.x25519, + x25519_sphinx_pubkey: self.current_sphinx_key(current_rotation_id), // we can't use the declared roles, we have to take whatever was provided in the contract. // why? say this node COULD operate as an exit, but it might be the case the contract decided // to assign it an ENTRY role only. we have to use that one instead. @@ -1381,6 +1446,110 @@ impl NodeRefreshBody { } } +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] +pub struct KeyRotationInfoResponse { + pub key_rotation_state: KeyRotationState, + + #[schema(value_type = u32)] + pub current_absolute_epoch_id: EpochId, + + #[serde(with = "time::serde::rfc3339")] + #[schemars(with = "String")] + #[schema(value_type = String)] + pub current_epoch_start: OffsetDateTime, + + pub epoch_duration: Duration, +} + +impl KeyRotationInfoResponse { + pub fn current_key_rotation_id(&self) -> u32 { + self.key_rotation_state + .key_rotation_id(self.current_absolute_epoch_id) + } + + pub fn next_rotation_starting_epoch_id(&self) -> EpochId { + self.key_rotation_state + .next_rotation_starting_epoch_id(self.current_absolute_epoch_id) + } + + pub fn current_rotation_starting_epoch_id(&self) -> EpochId { + self.key_rotation_state + .current_rotation_starting_epoch_id(self.current_absolute_epoch_id) + } + + fn current_epoch_progress(&self, now: OffsetDateTime) -> f32 { + let elapsed = (now - self.current_epoch_start).as_seconds_f32(); + elapsed / self.epoch_duration.as_secs_f32() + } + + pub fn is_epoch_stuck(&self) -> bool { + let now = OffsetDateTime::now_utc(); + let progress = self.current_epoch_progress(now); + if progress > 1. { + let into_next = 1. - progress; + // if epoch hasn't progressed for more than 20% of its duration, mark is as stuck + if into_next > 0.2 { + let diff_time = + Duration::from_secs_f32(into_next * self.epoch_duration.as_secs_f32()); + let expected_epoch_end = self.current_epoch_start + self.epoch_duration; + warn!("the current epoch is expected to have been over by {expected_epoch_end}. it's already {} overdue!", humantime_serde::re::humantime::format_duration(diff_time)); + return true; + } + } + + false + } + + // based on the current **TIME**, determine what's the expected current rotation id + pub fn expected_current_rotation_id(&self) -> u32 { + let now = OffsetDateTime::now_utc(); + let current_end = now + self.epoch_duration; + if now < current_end { + return self + .key_rotation_state + .key_rotation_id(self.current_absolute_epoch_id); + } + + let diff = now - current_end; + let passed_epochs = diff / self.epoch_duration; + let expected_current_epoch = self.current_absolute_epoch_id + passed_epochs.floor() as u32; + + self.key_rotation_state + .key_rotation_id(expected_current_epoch) + } + + pub fn until_next_rotation(&self) -> Option { + let current_epoch_progress = self.current_epoch_progress(OffsetDateTime::now_utc()); + if current_epoch_progress > 1. { + return None; + } + + let next_rotation_epoch = self.next_rotation_starting_epoch_id(); + let full_remaining = + (next_rotation_epoch - self.current_absolute_epoch_id).checked_add(1)?; + + let epochs_until_next_rotation = (1. - current_epoch_progress) + full_remaining as f32; + + Some(Duration::from_secs_f32( + epochs_until_next_rotation * self.epoch_duration.as_secs_f32(), + )) + } + + pub fn epoch_start_time(&self, absolute_epoch_id: EpochId) -> OffsetDateTime { + match absolute_epoch_id.cmp(&self.current_absolute_epoch_id) { + Ordering::Less => { + let diff = self.current_absolute_epoch_id - absolute_epoch_id; + self.current_epoch_start - diff * self.epoch_duration + } + Ordering::Equal => self.current_epoch_start, + Ordering::Greater => { + let diff = absolute_epoch_id - self.current_absolute_epoch_id; + self.current_epoch_start + diff * self.epoch_duration + } + } + } +} + #[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] pub struct RewardedSetResponse { #[serde(default)] diff --git a/nym-api/nym-api-requests/src/nym_nodes.rs b/nym-api/nym-api-requests/src/nym_nodes.rs index 544e07a8617..8e47ab45131 100644 --- a/nym-api/nym-api-requests/src/nym_nodes.rs +++ b/nym-api/nym-api-requests/src/nym_nodes.rs @@ -8,14 +8,28 @@ use nym_crypto::asymmetric::x25519::serde_helpers::bs58_x25519_pubkey; use nym_crypto::asymmetric::{ed25519, x25519}; use nym_mixnet_contract_common::nym_node::Role; use nym_mixnet_contract_common::reward_params::Performance; -use nym_mixnet_contract_common::{Interval, NodeId}; +use nym_mixnet_contract_common::{EpochId, Interval, NodeId}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::net::IpAddr; use time::OffsetDateTime; use utoipa::ToSchema; -#[derive(Clone, Copy, Debug, Serialize, Deserialize, schemars::JsonSchema, utoipa::ToSchema)] +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, utoipa::ToSchema)] +pub struct SkimmedNodesWithMetadata { + pub nodes: Vec, + pub metadata: NodesResponseMetadata, +} + +impl SkimmedNodesWithMetadata { + pub fn new(nodes: Vec, metadata: NodesResponseMetadata) -> Self { + SkimmedNodesWithMetadata { nodes, metadata } + } +} + +#[derive( + Clone, Copy, Debug, Serialize, Deserialize, schemars::JsonSchema, utoipa::ToSchema, PartialEq, +)] #[serde(rename_all = "kebab-case")] pub enum TopologyRequestStatus { NoUpdates, @@ -43,20 +57,56 @@ impl CachedNodesResponse { } } +#[derive( + Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, utoipa::ToSchema, PartialEq, +)] +pub struct NodesResponseMetadata { + pub status: Option, + #[schema(value_type = u32)] + pub absolute_epoch_id: EpochId, + pub rotation_id: u32, + pub refreshed_at: OffsetDateTimeJsonSchemaWrapper, +} + +impl NodesResponseMetadata { + pub fn refreshed_at(&self) -> OffsetDateTime { + self.refreshed_at.into() + } +} + #[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema)] -pub struct PaginatedCachedNodesResponse { +// can't add any new fields here, even with #[serde(default)] and whatnot, +// because it will break all clients using bincode : ( +pub struct PaginatedCachedNodesResponseV1 { pub status: Option, pub refreshed_at: OffsetDateTimeJsonSchemaWrapper, pub nodes: PaginatedResponse, } -impl PaginatedCachedNodesResponse { +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema)] +pub struct PaginatedCachedNodesResponseV2 { + pub metadata: NodesResponseMetadata, + pub nodes: PaginatedResponse, +} + +impl From> for PaginatedCachedNodesResponseV1 { + fn from(res: PaginatedCachedNodesResponseV2) -> Self { + PaginatedCachedNodesResponseV1 { + status: res.metadata.status, + refreshed_at: res.metadata.refreshed_at, + nodes: res.nodes, + } + } +} + +impl PaginatedCachedNodesResponseV2 { pub fn new_full( + absolute_epoch_id: EpochId, + rotation_id: u32, refreshed_at: impl Into, nodes: Vec, ) -> Self { - PaginatedCachedNodesResponse { - refreshed_at: refreshed_at.into(), + PaginatedCachedNodesResponseV2 { nodes: PaginatedResponse { pagination: Pagination { total: nodes.len(), @@ -65,19 +115,22 @@ impl PaginatedCachedNodesResponse { }, data: nodes, }, - status: None, + metadata: NodesResponseMetadata { + refreshed_at: refreshed_at.into(), + status: None, + absolute_epoch_id, + rotation_id, + }, } } - pub fn fresh(mut self, interval: Option) -> Self { - let iv = interval.map(TopologyRequestStatus::Fresh); - self.status = iv; + pub fn fresh(mut self, interval: Interval) -> Self { + self.metadata.status = Some(TopologyRequestStatus::Fresh(interval)); self } - pub fn no_updates() -> Self { - PaginatedCachedNodesResponse { - refreshed_at: OffsetDateTime::now_utc().into(), + pub fn no_updates(absolute_epoch_id: EpochId, rotation_id: u32) -> Self { + PaginatedCachedNodesResponseV2 { nodes: PaginatedResponse { pagination: Pagination { total: 0, @@ -86,7 +139,12 @@ impl PaginatedCachedNodesResponse { }, data: Vec::new(), }, - status: Some(TopologyRequestStatus::NoUpdates), + metadata: NodesResponseMetadata { + refreshed_at: OffsetDateTime::now_utc().into(), + status: Some(TopologyRequestStatus::NoUpdates), + absolute_epoch_id, + rotation_id, + }, } } } diff --git a/nym-api/settings.sql b/nym-api/settings.sql new file mode 100644 index 00000000000..321fef528aa --- /dev/null +++ b/nym-api/settings.sql @@ -0,0 +1,2 @@ +.mode columns +.headers on \ No newline at end of file diff --git a/nym-api/src/ecash/tests/mod.rs b/nym-api/src/ecash/tests/mod.rs index 0fe52829439..faaac904843 100644 --- a/nym-api/src/ecash/tests/mod.rs +++ b/nym-api/src/ecash/tests/mod.rs @@ -7,17 +7,19 @@ use crate::ecash::error::{EcashError, Result}; use crate::ecash::keys::KeyPairWithEpoch; use crate::ecash::state::EcashState; use crate::network::models::NetworkDetails; -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::node_status_api::handlers::unstable; use crate::node_status_api::NodeStatusCache; use crate::nym_contract_cache::cache::NymContractCache; use crate::status::ApiStatusState; use crate::support::caching::cache::SharedCache; use crate::support::config; -use crate::support::http::state::{AppState, ChainStatusCache, ForcedRefresh}; +use crate::support::http::state::chain_status::ChainStatusCache; +use crate::support::http::state::force_refresh::ForcedRefresh; +use crate::support::http::state::AppState; use crate::support::nyxd::Client; use crate::support::storage::NymApiStorage; -use crate::unstable_routes::account::cache::AddressInfoCache; +use crate::unstable_routes::v1::account::cache::AddressInfoCache; use async_trait::async_trait; use axum::Router; use axum_test::http::StatusCode; @@ -58,8 +60,8 @@ use nym_ecash_contract_common::blacklist::{BlacklistedAccountResponse, Blacklist use nym_ecash_contract_common::deposit::{Deposit, DepositId, DepositResponse}; use nym_task::TaskClient; use nym_validator_client::nym_api::routes::{ - API_VERSION, ECASH_BLIND_SIGN, ECASH_ISSUED_TICKETBOOKS_CHALLENGE_COMMITMENT, - ECASH_ISSUED_TICKETBOOKS_FOR, ECASH_ROUTES, + ECASH_BLIND_SIGN, ECASH_ISSUED_TICKETBOOKS_CHALLENGE_COMMITMENT, ECASH_ISSUED_TICKETBOOKS_FOR, + ECASH_ROUTES, V1_API_VERSION, }; use nym_validator_client::nyxd::cosmwasm_client::logs::Log; use nym_validator_client::nyxd::cosmwasm_client::types::ExecuteResult; @@ -1421,7 +1423,9 @@ impl TestFixture { async fn issue_ticketbook(&self, req: BlindSignRequestBody) -> BlindedSignatureResponse { let response = self .axum - .post(&format!("/{API_VERSION}/{ECASH_ROUTES}/{ECASH_BLIND_SIGN}")) + .post(&format!( + "/{V1_API_VERSION}/{ECASH_ROUTES}/{ECASH_BLIND_SIGN}" + )) .json(&req) .await; @@ -1436,7 +1440,7 @@ impl TestFixture { let response = self .axum .get(&format!( - "/{API_VERSION}/{ECASH_ROUTES}/{ECASH_ISSUED_TICKETBOOKS_FOR}/{expiration_date}" + "/{V1_API_VERSION}/{ECASH_ROUTES}/{ECASH_ISSUED_TICKETBOOKS_FOR}/{expiration_date}" )) .await; @@ -1452,7 +1456,7 @@ impl TestFixture { let dummy_keypair = ed25519::KeyPair::new(&mut OsRng); self.axum .post(&format!( - "/{API_VERSION}/{ECASH_ROUTES}/{ECASH_ISSUED_TICKETBOOKS_CHALLENGE_COMMITMENT}" + "/{V1_API_VERSION}/{ECASH_ROUTES}/{ECASH_ISSUED_TICKETBOOKS_CHALLENGE_COMMITMENT}" )) .json( &IssuedTicketbooksChallengeCommitmentRequestBody { @@ -1519,7 +1523,9 @@ mod credential_tests { let response = test_fixture .axum - .post(&format!("/{API_VERSION}/{ECASH_ROUTES}/{ECASH_BLIND_SIGN}")) + .post(&format!( + "/{V1_API_VERSION}/{ECASH_ROUTES}/{ECASH_BLIND_SIGN}" + )) .json(&request_body) .await; @@ -1703,7 +1709,9 @@ mod credential_tests { let response = test .axum - .post(&format!("/{API_VERSION}/{ECASH_ROUTES}/{ECASH_BLIND_SIGN}")) + .post(&format!( + "/{V1_API_VERSION}/{ECASH_ROUTES}/{ECASH_BLIND_SIGN}" + )) .json(&request_body) .await; diff --git a/nym-api/src/epoch_operations/error.rs b/nym-api/src/epoch_operations/error.rs index a608817425c..14e74c3e90b 100644 --- a/nym-api/src/epoch_operations/error.rs +++ b/nym-api/src/epoch_operations/error.rs @@ -56,9 +56,6 @@ pub enum RewardingError { source: rand::distributions::WeightedError, }, - #[error("could not obtain the current interval rewarding parameters")] - RewardingParamsRetrievalFailure, - #[error("{0}")] GenericError(#[from] anyhow::Error), } diff --git a/nym-api/src/epoch_operations/mod.rs b/nym-api/src/epoch_operations/mod.rs index 0a1f8bb566b..dd2c64945e8 100644 --- a/nym-api/src/epoch_operations/mod.rs +++ b/nym-api/src/epoch_operations/mod.rs @@ -12,7 +12,7 @@ // 3. Eventually this whole procedure is going to get expanded to allow for distribution of rewarded set generation // and hence this might be a good place for it. -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::node_status_api::{NodeStatusCache, ONE_DAY}; use crate::nym_contract_cache::cache::NymContractCache; use crate::support::caching::cache::SharedCache; @@ -22,7 +22,6 @@ use error::RewardingError; pub(crate) use helpers::RewardedNodeWithParams; use nym_mixnet_contract_common::{CurrentIntervalResponse, Interval}; use nym_task::{TaskClient, TaskManager}; -use std::collections::HashSet; use std::time::Duration; use tokio::time::sleep; use tracing::{error, info, trace, warn}; @@ -165,58 +164,6 @@ impl EpochAdvancer { Ok(()) } - // this purposely does not deal with nym-nodes as they don't have a concept of a blacklist. - // instead clients are meant to be filtering out them themselves based on the provided scores. - async fn update_legacy_node_blacklist( - &mut self, - interval: &Interval, - ) -> Result<(), RewardingError> { - info!("Updating blacklists"); - - let mut mix_blacklist_add = HashSet::new(); - let mut mix_blacklist_remove = HashSet::new(); - let mut gate_blacklist_add = HashSet::new(); - let mut gate_blacklist_remove = HashSet::new(); - - let mixnodes = self - .storage - .get_all_avg_mix_reliability_in_last_24hr(interval.current_epoch_end_unix_timestamp()) - .await?; - let gateways = self - .storage - .get_all_avg_gateway_reliability_in_last_24hr( - interval.current_epoch_end_unix_timestamp(), - ) - .await?; - - // TODO: Make thresholds configurable - for mix in mixnodes { - if mix.value() <= 50.0 { - mix_blacklist_add.insert(mix.mix_id()); - } else { - mix_blacklist_remove.insert(mix.mix_id()); - } - } - - self.nym_contract_cache - .update_mixnodes_blacklist(mix_blacklist_add, mix_blacklist_remove) - .await; - - for gateway in gateways { - if gateway.value() <= 50.0 { - gate_blacklist_add.insert(gateway.node_id()); - } else { - gate_blacklist_remove.insert(gateway.node_id()); - } - } - - self.nym_contract_cache - .update_gateways_blacklist(gate_blacklist_add, gate_blacklist_remove) - .await; - - Ok(()) - } - async fn wait_until_epoch_end(&mut self, shutdown: &mut TaskClient) -> Option { const POLL_INTERVAL: Duration = Duration::from_secs(120); @@ -267,7 +214,9 @@ impl EpochAdvancer { pub(crate) async fn run(&mut self, mut shutdown: TaskClient) -> Result<(), RewardingError> { info!("waiting for initial contract cache values before we can start rewarding"); - self.nym_contract_cache.wait_for_initial_values().await; + self.nym_contract_cache + .naive_wait_for_initial_values() + .await; info!("waiting for initial self-described cache values before we can start rewarding"); self.described_cache.naive_wait_for_initial_values().await; @@ -278,10 +227,7 @@ impl EpochAdvancer { None => return Ok(()), Some(interval) => interval, }; - if let Err(err) = self.update_legacy_node_blacklist(&interval_details).await { - error!("failed to update the node blacklist - {err}"); - continue; - } + if let Err(err) = self.perform_epoch_operations(interval_details).await { error!("failed to perform epoch operations - {err}"); sleep(Duration::from_secs(30)).await; diff --git a/nym-api/src/epoch_operations/rewarding.rs b/nym-api/src/epoch_operations/rewarding.rs index 46f082502b9..e02dba32083 100644 --- a/nym-api/src/epoch_operations/rewarding.rs +++ b/nym-api/src/epoch_operations/rewarding.rs @@ -70,6 +70,8 @@ impl EpochAdvancer { Ok(()) } + // SAFETY: `EpochAdvancer` is not started until cache is properly initialised + #[allow(clippy::unwrap_used)] pub(crate) async fn nodes_to_reward( &self, ) -> Result, RewardingError> { @@ -82,22 +84,20 @@ impl EpochAdvancer { self.nym_contract_cache .rewarded_set_owned() .await - .into_inner() + .unwrap() .into() } }; // we only need reward parameters for active set work factor and rewarded/active set sizes; // we do not need exact values of reward pool, staking supply, etc., so it's fine if it's slightly out of sync - let Some(reward_params) = self + + // SAFETY: `EpochAdvancer` is not started until cache is properly initialised + let reward_params = self .nym_contract_cache .interval_reward_params() .await - .into_inner() - else { - error!("failed to obtain the current interval rewarding parameters. can't determine rewards without them"); - return Err(RewardingError::RewardingParamsRetrievalFailure); - }; + .unwrap(); Ok(self .load_nodes_for_rewarding(&rewarded_set, reward_params) diff --git a/nym-api/src/key_rotation/mod.rs b/nym-api/src/key_rotation/mod.rs new file mode 100644 index 00000000000..6d648c2e92e --- /dev/null +++ b/nym-api/src/key_rotation/mod.rs @@ -0,0 +1,178 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::nym_contract_cache::cache::NymContractCache; +use crate::support::caching::refresher::{CacheUpdateWatcher, RefreshRequester}; +use nym_mixnet_contract_common::{Interval, KeyRotationState}; +use nym_task::TaskClient; +use time::OffsetDateTime; +use tracing::{debug, error, info, trace}; + +#[derive(Debug)] +struct ContractData { + interval: Interval, + key_rotation_state: KeyRotationState, +} + +impl ContractData { + fn rotation_id(&self) -> u32 { + self.key_rotation_state + .key_rotation_id(self.interval.current_epoch_absolute_id()) + } + + fn upcoming_rotation_id(&self) -> u32 { + self.rotation_id() + 1 + } + + fn current_epoch_progress(&self, now: OffsetDateTime) -> f32 { + let elapsed = (now - self.interval.current_epoch_start()).as_seconds_f32(); + elapsed / self.interval.epoch_length().as_secs_f32() + } + + fn epochs_until_next_rotation(&self) -> Option { + let current_epoch_progress = self.current_epoch_progress(OffsetDateTime::now_utc()); + + if !(0. ..=1.).contains(¤t_epoch_progress) { + error!("epoch seems to be stuck (current progress is at {:.1}%) - can't progress key rotation!", current_epoch_progress * 100.); + return None; + } + + let next_rotation_epoch = self + .key_rotation_state + .next_rotation_starting_epoch_id(self.interval.current_epoch_absolute_id()); + + let Some(full_epochs) = + (next_rotation_epoch - self.interval.current_epoch_absolute_id()).checked_sub(1) + else { + error!("CRITICAL FAILURE: invalid epoch calculation"); + return None; + }; + + Some((1. - current_epoch_progress) + full_epochs as f32) + } +} + +// 'simple' task responsible for making sure nym-api refreshes its self-described cache +// just before the next key rotation so it would have all the keys available +pub(crate) struct KeyRotationController { + pub(crate) last_described_refreshed_for: Option, + + pub(crate) describe_cache_refresher: RefreshRequester, + pub(crate) contract_cache_watcher: CacheUpdateWatcher, + pub(crate) contract_cache: NymContractCache, +} + +impl KeyRotationController { + pub(crate) fn new( + describe_cache_refresher: RefreshRequester, + contract_cache_watcher: CacheUpdateWatcher, + contract_cache: NymContractCache, + ) -> KeyRotationController { + KeyRotationController { + last_described_refreshed_for: None, + describe_cache_refresher, + contract_cache_watcher, + contract_cache, + } + } + + // SAFETY: this function is only called after cache has already been initialised + #[allow(clippy::unwrap_used)] + async fn get_contract_data(&self) -> ContractData { + let key_rotation_state = self.contract_cache.get_key_rotation_state().await.unwrap(); + let interval = self.contract_cache.current_interval().await.unwrap(); + ContractData { + interval, + key_rotation_state, + } + } + + async fn handle_contract_cache_update(&mut self) { + let updated = self.get_contract_data().await; + + info!( + "current rotation: {}", + updated + .key_rotation_state + .key_rotation_id(updated.interval.current_epoch_absolute_id()) + ); + + // if we're only 1/4 epoch away from the next rotation, and we haven't yet performed the refresh, + // update the self-described cache, as all nodes should have already pre-announced their new sphinx keys + if let Some(remaining) = updated.epochs_until_next_rotation() { + debug!("{remaining} epoch(s) remaining until next key rotation"); + let expected = Some(updated.upcoming_rotation_id()); + if remaining < 0.25 && self.last_described_refreshed_for != expected { + info!("{remaining} epoch(s) remaining until next key rotation - requesting full refresh of self-described cache"); + self.describe_cache_refresher.request_cache_refresh(); + self.last_described_refreshed_for = expected; + } + } + } + + async fn run(&mut self, mut task_client: TaskClient) { + self.contract_cache.naive_wait_for_initial_values().await; + self.handle_contract_cache_update().await; + + while !task_client.is_shutdown() { + tokio::select! { + biased; + _ = task_client.recv() => { + trace!("KeyRotationController: Received shutdown"); + } + _ = self.contract_cache_watcher.changed() => { + self.handle_contract_cache_update().await + } + } + } + + trace!("KeyRotationController: exiting") + } + + pub(crate) fn start(mut self, task_client: TaskClient) { + tokio::spawn(async move { self.run(task_client).await }); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use cosmwasm_std::testing::mock_env; + use cosmwasm_std::Timestamp; + use std::time::Duration; + + // Sun Jun 15 2025 15:06:40 GMT+0000 + const DUMMY_TIMESTAMP: i64 = 1750000000; + + fn dummy_contract_data() -> ContractData { + let mut env = mock_env(); + + env.block.time = Timestamp::from_seconds(DUMMY_TIMESTAMP as u64); + ContractData { + interval: Interval::init_interval(24, Duration::from_secs(60 * 60), &env), + key_rotation_state: KeyRotationState { + validity_epochs: 0, + initial_epoch_id: 0, + }, + } + } + + #[test] + fn current_epoch_progress() { + let dummy_data = dummy_contract_data(); + + let epoch_start = OffsetDateTime::from_unix_timestamp(DUMMY_TIMESTAMP).unwrap(); + let quarter_in = OffsetDateTime::from_unix_timestamp(DUMMY_TIMESTAMP + 15 * 60).unwrap(); + let half_in = OffsetDateTime::from_unix_timestamp(DUMMY_TIMESTAMP + 30 * 60).unwrap(); + let next = OffsetDateTime::from_unix_timestamp(DUMMY_TIMESTAMP + 60 * 60).unwrap(); + let one_and_half = OffsetDateTime::from_unix_timestamp(DUMMY_TIMESTAMP + 90 * 60).unwrap(); + let past_value = OffsetDateTime::from_unix_timestamp(DUMMY_TIMESTAMP - 30 * 60).unwrap(); + + assert_eq!(dummy_data.current_epoch_progress(epoch_start), 0.); + assert_eq!(dummy_data.current_epoch_progress(quarter_in), 0.25); + assert_eq!(dummy_data.current_epoch_progress(half_in), 0.5); + assert_eq!(dummy_data.current_epoch_progress(next), 1.); + assert_eq!(dummy_data.current_epoch_progress(one_and_half), 1.5); + assert_eq!(dummy_data.current_epoch_progress(past_value), -0.5); + } +} diff --git a/nym-api/src/main.rs b/nym-api/src/main.rs index faf39f2c14e..76bd1ab3dd0 100644 --- a/nym-api/src/main.rs +++ b/nym-api/src/main.rs @@ -18,6 +18,7 @@ use tracing::{info, trace}; mod circulating_supply_api; mod ecash; mod epoch_operations; +mod key_rotation; pub(crate) mod network; mod network_monitor; pub(crate) mod node_describe_cache; diff --git a/nym-api/src/network_monitor/mod.rs b/nym-api/src/network_monitor/mod.rs index 32028b263a6..ea1421105f4 100644 --- a/nym-api/src/network_monitor/mod.rs +++ b/nym-api/src/network_monitor/mod.rs @@ -11,7 +11,7 @@ use crate::network_monitor::monitor::receiver::{ use crate::network_monitor::monitor::sender::PacketSender; use crate::network_monitor::monitor::summary_producer::SummaryProducer; use crate::network_monitor::monitor::Monitor; -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::node_status_api::NodeStatusCache; use crate::nym_contract_cache::cache::NymContractCache; use crate::storage::NymApiStorage; diff --git a/nym-api/src/network_monitor/monitor/preparer.rs b/nym-api/src/network_monitor/monitor/preparer.rs index 69c509b8c7d..f0d6c4ebfa2 100644 --- a/nym-api/src/network_monitor/monitor/preparer.rs +++ b/nym-api/src/network_monitor/monitor/preparer.rs @@ -3,7 +3,8 @@ use crate::network_monitor::monitor::sender::GatewayPackets; use crate::network_monitor::test_route::TestRoute; -use crate::node_describe_cache::{DescribedNodes, NodeDescriptionTopologyExt}; +use crate::node_describe_cache::cache::DescribedNodes; +use crate::node_describe_cache::NodeDescriptionTopologyExt; use crate::node_status_api::NodeStatusCache; use crate::nym_contract_cache::cache::NymContractCache; use crate::support::caching::cache::SharedCache; @@ -155,16 +156,9 @@ impl PacketPreparer { pub(crate) async fn wait_for_validator_cache_initial_values(&self, minimum_full_routes: usize) { // wait for the caches to get initialised - self.contract_cache.wait_for_initial_values().await; + self.contract_cache.naive_wait_for_initial_values().await; self.described_cache.naive_wait_for_initial_values().await; - #[allow(clippy::expect_used)] - let described_nodes = self - .described_cache - .get() - .await - .expect("the self-describe cache should have been initialised!"); - // now wait for at least `minimum_full_routes` mixnodes per layer and `minimum_full_routes` gateway to be online info!("Waiting for minimal topology to be online"); let initialisation_backoff = Duration::from_secs(30); @@ -173,6 +167,13 @@ impl PacketPreparer { let mixnodes = self.contract_cache.legacy_mixnodes_all_basic().await; let nym_nodes = self.contract_cache.nym_nodes().await; + #[allow(clippy::expect_used)] + let described_nodes = self + .described_cache + .get() + .await + .expect("the self-describe cache should have been initialised!"); + let mut gateways_count = gateways.len(); let mut mixnodes_count = mixnodes.len(); @@ -285,13 +286,16 @@ impl PacketPreparer { fn to_legacy_layered_mixes<'a, R: Rng>( &self, rng: &mut R, + current_rotation_id: u32, node_statuses: &HashMap, mixing_nym_nodes: impl Iterator + 'a, ) -> HashMap> { let mut layered_mixes = HashMap::new(); for mixing_nym_node in mixing_nym_nodes { - let Some(parsed_node) = self.nym_node_to_routing_node(mixing_nym_node) else { + let Some(parsed_node) = + self.nym_node_to_routing_node(current_rotation_id, mixing_nym_node) + else { continue; }; // if the node is not present, default to 0.5 @@ -309,13 +313,16 @@ impl PacketPreparer { fn to_legacy_gateway_nodes<'a>( &self, + current_rotation_id: u32, node_statuses: &HashMap, gateway_capable_nym_nodes: impl Iterator + 'a, ) -> Vec<(RoutingNode, f64)> { let mut gateways = Vec::new(); for gateway_capable_node in gateway_capable_nym_nodes { - let Some(parsed_node) = self.nym_node_to_routing_node(gateway_capable_node) else { + let Some(parsed_node) = + self.nym_node_to_routing_node(current_rotation_id, gateway_capable_node) + else { continue; }; // if the node is not present, default to 0.5 @@ -341,11 +348,21 @@ impl PacketPreparer { // last I checked `gatewaying` wasn't a word : ) let gateway_capable_nym_nodes = descriptions.entry_capable_nym_nodes(); + // SAFETY: cache has already been initialised + #[allow(clippy::unwrap_used)] + let current_rotation_id = self.contract_cache.current_key_rotation_id().await.unwrap(); + let mut rng = thread_rng(); // separate mixes into layers for easier selection alongside the selection weights - let layered_mixes = self.to_legacy_layered_mixes(&mut rng, &statuses, mixing_nym_nodes); - let gateways = self.to_legacy_gateway_nodes(&statuses, gateway_capable_nym_nodes); + let layered_mixes = self.to_legacy_layered_mixes( + &mut rng, + current_rotation_id, + &statuses, + mixing_nym_nodes, + ); + let gateways = + self.to_legacy_gateway_nodes(current_rotation_id, &statuses, gateway_capable_nym_nodes); // get all nodes from each layer... let l1 = layered_mixes.get(&LegacyMixLayer::One)?; @@ -399,7 +416,14 @@ impl PacketPreparer { let node_3 = rand_l3[i].clone(); let gateway = rand_gateways[i].clone(); - routes.push(TestRoute::new(rng.gen(), node_1, node_2, node_3, gateway)) + routes.push(TestRoute::new( + rng.gen(), + current_rotation_id, + node_1, + node_2, + node_3, + gateway, + )) } info!("The following routes will be used for testing: {routes:#?}"); Some(routes) @@ -482,8 +506,12 @@ impl PacketPreparer { (parsed_nodes, invalid_nodes) } - fn nym_node_to_routing_node(&self, description: &NymNodeDescription) -> Option { - description.try_to_topology_node().ok() + fn nym_node_to_routing_node( + &self, + current_rotation_id: u32, + description: &NymNodeDescription, + ) -> Option { + description.try_to_topology_node(current_rotation_id).ok() } pub(super) async fn prepare_test_packets( @@ -495,6 +523,10 @@ impl PacketPreparer { ) -> PreparedPackets { let (mixnodes, gateways) = self.all_legacy_mixnodes_and_gateways().await; + // SAFETY: cache has already been initialised + #[allow(clippy::unwrap_used)] + let current_rotation_id = self.contract_cache.current_key_rotation_id().await.unwrap(); + #[allow(clippy::expect_used)] let descriptions = self .described_cache @@ -521,7 +553,7 @@ impl PacketPreparer { // try to add nym-nodes into the fold for mix in mixing_nym_nodes { - if let Some(parsed) = self.nym_node_to_routing_node(mix) { + if let Some(parsed) = self.nym_node_to_routing_node(current_rotation_id, mix) { mixnodes_under_test.push(TestableNode::new_routing(&parsed, NodeType::Mixnode)); mixnodes_to_test_details.push(parsed); } @@ -535,7 +567,7 @@ impl PacketPreparer { .collect::>(); for gateway in gateway_capable_nym_nodes { - if let Some(parsed) = self.nym_node_to_routing_node(gateway) { + if let Some(parsed) = self.nym_node_to_routing_node(current_rotation_id, gateway) { gateways_under_test.push(TestableNode::new_routing(&parsed, NodeType::Gateway)); gateways_to_test_details.push(parsed); } diff --git a/nym-api/src/network_monitor/test_route/mod.rs b/nym-api/src/network_monitor/test_route/mod.rs index fd59a6419bc..6280ba9f011 100644 --- a/nym-api/src/network_monitor/test_route/mod.rs +++ b/nym-api/src/network_monitor/test_route/mod.rs @@ -7,7 +7,7 @@ use nym_crypto::asymmetric::ed25519; use nym_mixnet_contract_common::nym_node::Role; use nym_mixnet_contract_common::{EpochId, EpochRewardedSet, RewardedSet}; use nym_topology::node::RoutingNode; -use nym_topology::{NymRouteProvider, NymTopology}; +use nym_topology::{NymRouteProvider, NymTopology, NymTopologyMetadata}; use std::fmt::{Debug, Formatter}; #[derive(Clone)] @@ -19,6 +19,7 @@ pub(crate) struct TestRoute { impl TestRoute { pub(crate) fn new( id: u64, + key_rotation_id: u32, l1_mix: RoutingNode, l2_mix: RoutingNode, l3_mix: RoutingNode, @@ -40,7 +41,11 @@ impl TestRoute { TestRoute { id, - nodes: NymTopology::new(fake_rewarded_set, nodes), + nodes: NymTopology::new( + NymTopologyMetadata::new(key_rotation_id, 0), + fake_rewarded_set, + nodes, + ), } } diff --git a/nym-api/src/node_describe_cache/cache.rs b/nym-api/src/node_describe_cache/cache.rs new file mode 100644 index 00000000000..3f3de776d60 --- /dev/null +++ b/nym-api/src/node_describe_cache/cache.rs @@ -0,0 +1,65 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use nym_api_requests::models::{DescribedNodeType, NymNodeData, NymNodeDescription}; +use nym_mixnet_contract_common::NodeId; +use std::collections::HashMap; +use std::net::IpAddr; + +#[derive(Debug, Clone)] +pub struct DescribedNodes { + pub(crate) nodes: HashMap, + pub(crate) addresses_cache: HashMap, +} + +impl DescribedNodes { + pub fn force_update(&mut self, node: NymNodeDescription) { + for ip in &node.description.host_information.ip_address { + self.addresses_cache.insert(*ip, node.node_id); + } + self.nodes.insert(node.node_id, node); + } + + pub fn get_description(&self, node_id: &NodeId) -> Option<&NymNodeData> { + self.nodes.get(node_id).map(|n| &n.description) + } + + pub fn get_node(&self, node_id: &NodeId) -> Option<&NymNodeDescription> { + self.nodes.get(node_id) + } + + pub fn all_nodes(&self) -> impl Iterator { + self.nodes.values() + } + + pub fn all_nym_nodes(&self) -> impl Iterator { + self.nodes + .values() + .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) + } + + pub fn mixing_nym_nodes(&self) -> impl Iterator { + self.nodes + .values() + .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) + .filter(|n| n.description.declared_role.mixnode) + } + + pub fn entry_capable_nym_nodes(&self) -> impl Iterator { + self.nodes + .values() + .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) + .filter(|n| n.description.declared_role.entry) + } + + pub fn exit_capable_nym_nodes(&self) -> impl Iterator { + self.nodes + .values() + .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) + .filter(|n| n.description.declared_role.can_operate_exit_gateway()) + } + + pub fn node_with_address(&self, address: IpAddr) -> Option { + self.addresses_cache.get(&address).copied() + } +} diff --git a/nym-api/src/node_describe_cache/mod.rs b/nym-api/src/node_describe_cache/mod.rs index cdb9ab67b85..096555aa267 100644 --- a/nym-api/src/node_describe_cache/mod.rs +++ b/nym-api/src/node_describe_cache/mod.rs @@ -1,28 +1,19 @@ // Copyright 2023-2024 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::node_describe_cache::query_helpers::query_for_described_data; -use crate::nym_contract_cache::cache::NymContractCache; -use crate::support::caching::cache::{SharedCache, UninitialisedCache}; -use crate::support::caching::refresher::{CacheItemProvider, CacheRefresher}; -use crate::support::config; -use crate::support::config::DEFAULT_NODE_DESCRIBE_BATCH_SIZE; -use async_trait::async_trait; -use futures::{stream, StreamExt}; -use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeDetailsWithLayer}; -use nym_api_requests::models::{DescribedNodeType, NymNodeData, NymNodeDescription}; +use crate::support::caching::cache::UninitialisedCache; +use nym_api_requests::models::NymNodeDescription; use nym_config::defaults::DEFAULT_NYM_NODE_HTTP_PORT; -use nym_crypto::asymmetric::ed25519; -use nym_mixnet_contract_common::{NodeId, NymNodeDetails}; -use nym_node_requests::api::client::{NymNodeApiClientError, NymNodeApiClientExt}; -use nym_topology::node::{RoutingNode, RoutingNodeError}; -use std::collections::HashMap; -use std::net::IpAddr; -use std::time::Duration; +use nym_mixnet_contract_common::NodeId; +use nym_node_requests::api::client::NymNodeApiClientError; +use nym_topology::node::RoutingNodeError; +use nym_topology::RoutingNode; use thiserror::Error; -use tracing::{debug, error, info}; +pub(crate) mod cache; +pub(crate) mod provider; mod query_helpers; +pub(crate) mod refresh; #[derive(Debug, Error)] pub enum NodeDescribeCacheError { @@ -71,390 +62,20 @@ pub enum NodeDescribeCacheError { // this exists because I've been moving things around quite a lot and now the place that holds the type // doesn't have relevant dependencies for proper impl pub(crate) trait NodeDescriptionTopologyExt { - fn try_to_topology_node(&self) -> Result; + fn try_to_topology_node( + &self, + current_rotation_id: u32, + ) -> Result; } impl NodeDescriptionTopologyExt for NymNodeDescription { - fn try_to_topology_node(&self) -> Result { + fn try_to_topology_node( + &self, + current_rotation_id: u32, + ) -> Result { // for the purposes of routing, performance is completely ignored, // so add dummy value and piggyback on existing conversion - (&self.to_skimmed_node(Default::default(), Default::default())).try_into() + (&self.to_skimmed_node(current_rotation_id, Default::default(), Default::default())) + .try_into() } } - -#[derive(Debug, Clone)] -pub struct DescribedNodes { - nodes: HashMap, - addresses_cache: HashMap, -} - -impl DescribedNodes { - pub fn force_update(&mut self, node: NymNodeDescription) { - for ip in &node.description.host_information.ip_address { - self.addresses_cache.insert(*ip, node.node_id); - } - self.nodes.insert(node.node_id, node); - } - - pub fn get_description(&self, node_id: &NodeId) -> Option<&NymNodeData> { - self.nodes.get(node_id).map(|n| &n.description) - } - - pub fn get_node(&self, node_id: &NodeId) -> Option<&NymNodeDescription> { - self.nodes.get(node_id) - } - - pub fn all_nodes(&self) -> impl Iterator { - self.nodes.values() - } - - pub fn all_nym_nodes(&self) -> impl Iterator { - self.nodes - .values() - .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) - } - - pub fn mixing_nym_nodes(&self) -> impl Iterator { - self.nodes - .values() - .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) - .filter(|n| n.description.declared_role.mixnode) - } - - pub fn entry_capable_nym_nodes(&self) -> impl Iterator { - self.nodes - .values() - .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) - .filter(|n| n.description.declared_role.entry) - } - - pub fn exit_capable_nym_nodes(&self) -> impl Iterator { - self.nodes - .values() - .filter(|n| n.contract_node_type == DescribedNodeType::NymNode) - .filter(|n| n.description.declared_role.can_operate_exit_gateway()) - } - - pub fn node_with_address(&self, address: IpAddr) -> Option { - self.addresses_cache.get(&address).copied() - } -} - -pub struct NodeDescriptionProvider { - contract_cache: NymContractCache, - - allow_all_ips: bool, - batch_size: usize, -} - -impl NodeDescriptionProvider { - pub(crate) fn new( - contract_cache: NymContractCache, - allow_all_ips: bool, - ) -> NodeDescriptionProvider { - NodeDescriptionProvider { - contract_cache, - allow_all_ips, - batch_size: DEFAULT_NODE_DESCRIBE_BATCH_SIZE, - } - } - - #[must_use] - pub(crate) fn with_batch_size(mut self, batch_size: usize) -> Self { - self.batch_size = batch_size; - self - } -} - -async fn try_get_client( - host: &str, - node_id: NodeId, - custom_port: Option, -) -> Result { - // first try the standard port in case the operator didn't put the node behind the proxy, - // then default https (443) - // finally default http (80) - let mut addresses_to_try = vec![ - format!("http://{host}:{DEFAULT_NYM_NODE_HTTP_PORT}"), // 'standard' nym-node - format!("https://{host}"), // node behind https proxy (443) - format!("http://{host}"), // node behind http proxy (80) - ]; - - // note: I removed 'standard' legacy mixnode port because it should now be automatically pulled via - // the 'custom_port' since it should have been present in the contract. - - if let Some(port) = custom_port { - addresses_to_try.insert(0, format!("http://{host}:{port}")); - } - - for address in addresses_to_try { - // if provided host was malformed, no point in continuing - let client = match nym_node_requests::api::Client::builder(address).and_then(|b| { - b.with_timeout(Duration::from_secs(5)) - .no_hickory_dns() - .with_user_agent("nym-api-describe-cache") - .build() - }) { - Ok(client) => client, - Err(err) => { - return Err(NodeDescribeCacheError::MalformedHost { - host: host.to_string(), - node_id, - source: err, - }); - } - }; - - if let Ok(health) = client.get_health().await { - if health.status.is_up() { - return Ok(client); - } - } - } - - Err(NodeDescribeCacheError::NoHttpPortsAvailable { - host: host.to_string(), - node_id, - }) -} - -async fn try_get_description( - data: RefreshData, - allow_all_ips: bool, -) -> Result { - let client = try_get_client(&data.host, data.node_id, data.port).await?; - - let map_query_err = |err| NodeDescribeCacheError::ApiFailure { - node_id: data.node_id, - source: err, - }; - - let host_info = client.get_host_information().await.map_err(map_query_err)?; - - // check if the identity key matches the information provided during bonding - if data.expected_identity != host_info.keys.ed25519_identity { - return Err(NodeDescribeCacheError::MismatchedIdentity { - node_id: data.node_id, - expected: data.expected_identity.to_base58_string(), - got: host_info.keys.ed25519_identity.to_base58_string(), - }); - } - - if !host_info.verify_host_information() { - return Err(NodeDescribeCacheError::MissignedHostInformation { - node_id: data.node_id, - }); - } - - if !allow_all_ips && !host_info.data.check_ips() { - return Err(NodeDescribeCacheError::IllegalIpAddress { - node_id: data.node_id, - }); - } - - let node_info = query_for_described_data(&client, data.node_id).await?; - let description = node_info.into_node_description(host_info.data); - - Ok(NymNodeDescription { - node_id: data.node_id, - contract_node_type: data.node_type, - description, - }) -} - -#[derive(Debug)] -pub(crate) struct RefreshData { - host: String, - node_id: NodeId, - expected_identity: ed25519::PublicKey, - node_type: DescribedNodeType, - - port: Option, -} - -impl<'a> TryFrom<&'a LegacyMixNodeDetailsWithLayer> for RefreshData { - type Error = ed25519::Ed25519RecoveryError; - - fn try_from(node: &'a LegacyMixNodeDetailsWithLayer) -> Result { - Ok(RefreshData::new( - &node.bond_information.mix_node.host, - node.bond_information.identity().parse()?, - DescribedNodeType::LegacyMixnode, - node.mix_id(), - Some(node.bond_information.mix_node.http_api_port), - )) - } -} - -impl<'a> TryFrom<&'a LegacyGatewayBondWithId> for RefreshData { - type Error = ed25519::Ed25519RecoveryError; - - fn try_from(node: &'a LegacyGatewayBondWithId) -> Result { - Ok(RefreshData::new( - &node.bond.gateway.host, - node.bond.identity().parse()?, - DescribedNodeType::LegacyGateway, - node.node_id, - None, - )) - } -} - -impl<'a> TryFrom<&'a NymNodeDetails> for RefreshData { - type Error = ed25519::Ed25519RecoveryError; - - fn try_from(node: &'a NymNodeDetails) -> Result { - Ok(RefreshData::new( - &node.bond_information.node.host, - node.bond_information.identity().parse()?, - DescribedNodeType::NymNode, - node.node_id(), - node.bond_information.node.custom_http_port, - )) - } -} - -impl RefreshData { - pub fn new( - host: impl Into, - expected_identity: ed25519::PublicKey, - node_type: DescribedNodeType, - node_id: NodeId, - port: Option, - ) -> Self { - RefreshData { - host: host.into(), - node_id, - expected_identity, - node_type, - port, - } - } - - pub(crate) fn node_id(&self) -> NodeId { - self.node_id - } - - pub(crate) async fn try_refresh(self, allow_all_ips: bool) -> Option { - match try_get_description(self, allow_all_ips).await { - Ok(description) => Some(description), - Err(err) => { - debug!("failed to obtain node self-described data: {err}"); - None - } - } - } -} - -#[async_trait] -impl CacheItemProvider for NodeDescriptionProvider { - type Item = DescribedNodes; - type Error = NodeDescribeCacheError; - - async fn wait_until_ready(&self) { - self.contract_cache.wait_for_initial_values().await - } - - async fn try_refresh(&self) -> Result { - // we need to query: - // - legacy mixnodes (because they might already be running nym-nodes, but haven't updated contract info) - // - legacy gateways (because they might already be running nym-nodes, but haven't updated contract info) - // - nym-nodes - - let mut nodes_to_query: Vec = Vec::new(); - - match self.contract_cache.all_cached_legacy_mixnodes().await { - None => error!("failed to obtain mixnodes information from the cache"), - Some(legacy_mixnodes) => { - for node in &**legacy_mixnodes { - if let Ok(data) = node.try_into() { - nodes_to_query.push(data); - } - } - } - } - - match self.contract_cache.all_cached_legacy_gateways().await { - None => error!("failed to obtain gateways information from the cache"), - Some(legacy_gateways) => { - for node in &**legacy_gateways { - if let Ok(data) = node.try_into() { - nodes_to_query.push(data); - } - } - } - } - - match self.contract_cache.all_cached_nym_nodes().await { - None => error!("failed to obtain nym-nodes information from the cache"), - Some(nym_nodes) => { - for node in &**nym_nodes { - if let Ok(data) = node.try_into() { - nodes_to_query.push(data); - } - } - } - } - - let nodes = stream::iter( - nodes_to_query - .into_iter() - .map(|n| n.try_refresh(self.allow_all_ips)), - ) - .buffer_unordered(self.batch_size) - .filter_map(|x| async move { x.map(|d| (d.node_id, d)) }) - .collect::>() - .await; - - let mut addresses_cache = HashMap::new(); - for node in nodes.values() { - for ip in &node.description.host_information.ip_address { - addresses_cache.insert(*ip, node.node_id); - } - } - - info!("refreshed self described data for {} nodes", nodes.len()); - info!("with {} unique ip addresses", addresses_cache.len()); - - Ok(DescribedNodes { - nodes, - addresses_cache, - }) - } -} - -// currently dead code : ( -#[allow(dead_code)] -pub(crate) fn new_refresher( - config: &config::TopologyCacher, - contract_cache: NymContractCache, -) -> CacheRefresher { - CacheRefresher::new( - Box::new( - NodeDescriptionProvider::new( - contract_cache, - config.debug.node_describe_allow_illegal_ips, - ) - .with_batch_size(config.debug.node_describe_batch_size), - ), - config.debug.node_describe_caching_interval, - ) -} - -pub(crate) fn new_refresher_with_initial_value( - config: &config::TopologyCacher, - contract_cache: NymContractCache, - initial: SharedCache, -) -> CacheRefresher { - CacheRefresher::new_with_initial_value( - Box::new( - NodeDescriptionProvider::new( - contract_cache, - config.debug.node_describe_allow_illegal_ips, - ) - .with_batch_size(config.debug.node_describe_batch_size), - ), - config.debug.node_describe_caching_interval, - initial, - ) -} diff --git a/nym-api/src/node_describe_cache/provider.rs b/nym-api/src/node_describe_cache/provider.rs new file mode 100644 index 00000000000..da74b2a5f96 --- /dev/null +++ b/nym-api/src/node_describe_cache/provider.rs @@ -0,0 +1,154 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node_describe_cache::cache::DescribedNodes; +use crate::node_describe_cache::refresh::RefreshData; +use crate::node_describe_cache::NodeDescribeCacheError; +use crate::nym_contract_cache::cache::NymContractCache; +use crate::support::caching::cache::SharedCache; +use crate::support::caching::refresher::{CacheItemProvider, CacheRefresher}; +use crate::support::config; +use crate::support::config::DEFAULT_NODE_DESCRIBE_BATCH_SIZE; +use async_trait::async_trait; +use futures::{stream, StreamExt}; +use std::collections::HashMap; +use tracing::{error, info}; + +pub struct NodeDescriptionProvider { + contract_cache: NymContractCache, + + allow_all_ips: bool, + batch_size: usize, +} + +impl NodeDescriptionProvider { + pub(crate) fn new( + contract_cache: NymContractCache, + allow_all_ips: bool, + ) -> NodeDescriptionProvider { + NodeDescriptionProvider { + contract_cache, + allow_all_ips, + batch_size: DEFAULT_NODE_DESCRIBE_BATCH_SIZE, + } + } + + #[must_use] + pub(crate) fn with_batch_size(mut self, batch_size: usize) -> Self { + self.batch_size = batch_size; + self + } +} + +#[async_trait] +impl CacheItemProvider for NodeDescriptionProvider { + type Item = DescribedNodes; + type Error = NodeDescribeCacheError; + + async fn wait_until_ready(&self) { + self.contract_cache.naive_wait_for_initial_values().await + } + + async fn try_refresh(&self) -> Result { + // we need to query: + // - legacy mixnodes (because they might already be running nym-nodes, but haven't updated contract info) + // - legacy gateways (because they might already be running nym-nodes, but haven't updated contract info) + // - nym-nodes + + let mut nodes_to_query: Vec = Vec::new(); + + match self.contract_cache.all_cached_legacy_mixnodes().await { + None => error!("failed to obtain mixnodes information from the cache"), + Some(legacy_mixnodes) => { + for node in &**legacy_mixnodes { + if let Ok(data) = node.try_into() { + nodes_to_query.push(data); + } + } + } + } + + match self.contract_cache.all_cached_legacy_gateways().await { + None => error!("failed to obtain gateways information from the cache"), + Some(legacy_gateways) => { + for node in &**legacy_gateways { + if let Ok(data) = node.try_into() { + nodes_to_query.push(data); + } + } + } + } + + match self.contract_cache.all_cached_nym_nodes().await { + None => error!("failed to obtain nym-nodes information from the cache"), + Some(nym_nodes) => { + for node in &**nym_nodes { + if let Ok(data) = node.try_into() { + nodes_to_query.push(data); + } + } + } + } + + let nodes = stream::iter( + nodes_to_query + .into_iter() + .map(|n| n.try_refresh(self.allow_all_ips)), + ) + .buffer_unordered(self.batch_size) + .filter_map(|x| async move { x.map(|d| (d.node_id, d)) }) + .collect::>() + .await; + + let mut addresses_cache = HashMap::new(); + for node in nodes.values() { + for ip in &node.description.host_information.ip_address { + addresses_cache.insert(*ip, node.node_id); + } + } + + info!("refreshed self described data for {} nodes", nodes.len()); + info!("with {} unique ip addresses", addresses_cache.len()); + + Ok(DescribedNodes { + nodes, + addresses_cache, + }) + } +} + +// currently dead code : ( +#[allow(dead_code)] +pub(crate) fn new_refresher( + config: &config::TopologyCacher, + contract_cache: NymContractCache, +) -> CacheRefresher { + CacheRefresher::new( + Box::new( + NodeDescriptionProvider::new( + contract_cache, + config.debug.node_describe_allow_illegal_ips, + ) + .with_batch_size(config.debug.node_describe_batch_size), + ), + config.debug.node_describe_caching_interval, + ) +} + +pub(crate) fn new_provider_with_initial_value( + config: &config::TopologyCacher, + contract_cache: NymContractCache, + initial: SharedCache, +) -> CacheRefresher { + CacheRefresher::new_with_initial_value( + Box::new( + NodeDescriptionProvider::new( + contract_cache, + config.debug.node_describe_allow_illegal_ips, + ) + .with_batch_size(config.debug.node_describe_batch_size), + ), + config.debug.node_describe_caching_interval, + initial, + ) +} diff --git a/nym-api/src/node_describe_cache/refresh.rs b/nym-api/src/node_describe_cache/refresh.rs new file mode 100644 index 00000000000..6f3949c6c65 --- /dev/null +++ b/nym-api/src/node_describe_cache/refresh.rs @@ -0,0 +1,195 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node_describe_cache::query_helpers::query_for_described_data; +use crate::node_describe_cache::NodeDescribeCacheError; +use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeDetailsWithLayer}; +use nym_api_requests::models::{DescribedNodeType, NymNodeDescription}; +use nym_bin_common::bin_info; +use nym_config::defaults::DEFAULT_NYM_NODE_HTTP_PORT; +use nym_crypto::asymmetric::ed25519; +use nym_mixnet_contract_common::{NodeId, NymNodeDetails}; +use nym_node_requests::api::client::NymNodeApiClientExt; +use nym_validator_client::UserAgent; +use std::time::Duration; +use tracing::debug; + +#[derive(Debug)] +pub(crate) struct RefreshData { + host: String, + node_id: NodeId, + expected_identity: ed25519::PublicKey, + node_type: DescribedNodeType, + + port: Option, +} + +impl<'a> TryFrom<&'a LegacyMixNodeDetailsWithLayer> for RefreshData { + type Error = ed25519::Ed25519RecoveryError; + + fn try_from(node: &'a LegacyMixNodeDetailsWithLayer) -> Result { + Ok(RefreshData::new( + &node.bond_information.mix_node.host, + node.bond_information.identity().parse()?, + DescribedNodeType::LegacyMixnode, + node.mix_id(), + Some(node.bond_information.mix_node.http_api_port), + )) + } +} + +impl<'a> TryFrom<&'a LegacyGatewayBondWithId> for RefreshData { + type Error = ed25519::Ed25519RecoveryError; + + fn try_from(node: &'a LegacyGatewayBondWithId) -> Result { + Ok(RefreshData::new( + &node.bond.gateway.host, + node.bond.identity().parse()?, + DescribedNodeType::LegacyGateway, + node.node_id, + None, + )) + } +} + +impl<'a> TryFrom<&'a NymNodeDetails> for RefreshData { + type Error = ed25519::Ed25519RecoveryError; + + fn try_from(node: &'a NymNodeDetails) -> Result { + Ok(RefreshData::new( + &node.bond_information.node.host, + node.bond_information.identity().parse()?, + DescribedNodeType::NymNode, + node.node_id(), + node.bond_information.node.custom_http_port, + )) + } +} + +impl RefreshData { + pub fn new( + host: impl Into, + expected_identity: ed25519::PublicKey, + node_type: DescribedNodeType, + node_id: NodeId, + port: Option, + ) -> Self { + RefreshData { + host: host.into(), + node_id, + expected_identity, + node_type, + port, + } + } + + pub(crate) fn node_id(&self) -> NodeId { + self.node_id + } + + pub(crate) async fn try_refresh(self, allow_all_ips: bool) -> Option { + match try_get_description(self, allow_all_ips).await { + Ok(description) => Some(description), + Err(err) => { + debug!("failed to obtain node self-described data: {err}"); + None + } + } + } +} + +async fn try_get_client( + host: &str, + node_id: NodeId, + custom_port: Option, +) -> Result { + // first try the standard port in case the operator didn't put the node behind the proxy, + // then default https (443) + // finally default http (80) + let mut addresses_to_try = vec![ + format!("http://{host}:{DEFAULT_NYM_NODE_HTTP_PORT}"), // 'standard' nym-node + format!("https://{host}"), // node behind https proxy (443) + format!("http://{host}"), // node behind http proxy (80) + ]; + + // note: I removed 'standard' legacy mixnode port because it should now be automatically pulled via + // the 'custom_port' since it should have been present in the contract. + + if let Some(port) = custom_port { + addresses_to_try.insert(0, format!("http://{host}:{port}")); + } + + for address in addresses_to_try { + // if provided host was malformed, no point in continuing + let client = match nym_node_requests::api::Client::builder(address).and_then(|b| { + b.with_timeout(Duration::from_secs(5)) + .no_hickory_dns() + .with_user_agent(UserAgent::from(bin_info!())) + .build() + }) { + Ok(client) => client, + Err(err) => { + return Err(NodeDescribeCacheError::MalformedHost { + host: host.to_string(), + node_id, + source: err, + }); + } + }; + + if let Ok(health) = client.get_health().await { + if health.status.is_up() { + return Ok(client); + } + } + } + + Err(NodeDescribeCacheError::NoHttpPortsAvailable { + host: host.to_string(), + node_id, + }) +} + +async fn try_get_description( + data: RefreshData, + allow_all_ips: bool, +) -> Result { + let client = try_get_client(&data.host, data.node_id, data.port).await?; + + let map_query_err = |err| NodeDescribeCacheError::ApiFailure { + node_id: data.node_id, + source: err, + }; + + let host_info = client.get_host_information().await.map_err(map_query_err)?; + + // check if the identity key matches the information provided during bonding + if data.expected_identity != host_info.keys.ed25519_identity { + return Err(NodeDescribeCacheError::MismatchedIdentity { + node_id: data.node_id, + expected: data.expected_identity.to_base58_string(), + got: host_info.keys.ed25519_identity.to_base58_string(), + }); + } + + if !host_info.verify_host_information() { + return Err(NodeDescribeCacheError::MissignedHostInformation { + node_id: data.node_id, + }); + } + + if !allow_all_ips && !host_info.data.check_ips() { + return Err(NodeDescribeCacheError::IllegalIpAddress { + node_id: data.node_id, + }); + } + + let node_info = query_for_described_data(&client, data.node_id).await?; + let description = node_info.into_node_description(host_info.data); + + Ok(NymNodeDescription { + node_id: data.node_id, + contract_node_type: data.node_type, + description, + }) +} diff --git a/nym-api/src/node_status_api/cache/inclusion_probabilities.rs b/nym-api/src/node_status_api/cache/inclusion_probabilities.rs index ab54c30782f..3cc9abc8070 100644 --- a/nym-api/src/node_status_api/cache/inclusion_probabilities.rs +++ b/nym-api/src/node_status_api/cache/inclusion_probabilities.rs @@ -5,14 +5,9 @@ use nym_api_requests::legacy::LegacyMixNodeDetailsWithLayer; use nym_api_requests::models::InclusionProbability; -use nym_contracts_common::truncate_decimal; -use nym_mixnet_contract_common::{NodeId, RewardingParams}; +use nym_mixnet_contract_common::NodeId; use serde::Serialize; use std::time::Duration; -use tracing::error; - -const MAX_SIMULATION_SAMPLES: u64 = 5000; -const MAX_SIMULATION_TIME_SEC: u64 = 15; #[deprecated] #[derive(Clone, Default, Serialize, schemars::JsonSchema)] @@ -25,11 +20,24 @@ pub(crate) struct InclusionProbabilities { } impl InclusionProbabilities { - pub(crate) fn compute( + pub(crate) fn legacy_zero( mixnodes: &[LegacyMixNodeDetailsWithLayer], - params: RewardingParams, - ) -> Option { - compute_inclusion_probabilities(mixnodes, params) + ) -> InclusionProbabilities { + // (all legacy mixnodes have 0% chance of being selected) + InclusionProbabilities { + inclusion_probabilities: mixnodes + .iter() + .map(|m| InclusionProbability { + mix_id: m.mix_id(), + in_active: 0.0, + in_reserve: 0.0, + }) + .collect(), + samples: 0, + elapsed: Default::default(), + delta_max: 0.0, + delta_l2: 0.0, + } } pub(crate) fn node(&self, mix_id: NodeId) -> Option<&InclusionProbability> { @@ -38,63 +46,3 @@ impl InclusionProbabilities { .find(|x| x.mix_id == mix_id) } } - -#[deprecated] -fn compute_inclusion_probabilities( - mixnodes: &[LegacyMixNodeDetailsWithLayer], - params: RewardingParams, -) -> Option { - let active_set_size = params.active_set_size(); - let standby_set_size = params.rewarded_set.standby; - - // Unzip list of total bonds into ids and bonds. - // We need to go through this zip/unzip procedure to make sure we have matching identities - // for the input to the simulator, which assumes the identity is the position in the vec - let (ids, mixnode_total_bonds) = unzip_into_mixnode_ids_and_total_bonds(mixnodes); - - // Compute inclusion probabilitites and keep track of how long time it took. - let mut rng = rand::thread_rng(); - let results = nym_inclusion_probability::simulate_selection_probability_mixnodes( - &mixnode_total_bonds, - active_set_size as usize, - standby_set_size as usize, - MAX_SIMULATION_SAMPLES, - Duration::from_secs(MAX_SIMULATION_TIME_SEC), - &mut rng, - ) - .inspect_err(|err| error!("{err}")) - .ok()?; - - Some(InclusionProbabilities { - inclusion_probabilities: zip_ids_together_with_results(&ids, &results), - samples: results.samples, - elapsed: results.time, - delta_max: results.delta_max, - delta_l2: results.delta_l2, - }) -} - -fn unzip_into_mixnode_ids_and_total_bonds( - mixnodes: &[LegacyMixNodeDetailsWithLayer], -) -> (Vec, Vec) { - mixnodes - .iter() - .map(|m| (m.mix_id(), truncate_decimal(m.total_stake()).u128())) - .unzip() -} - -#[deprecated] -fn zip_ids_together_with_results( - ids: &[NodeId], - results: &nym_inclusion_probability::SelectionProbability, -) -> Vec { - ids.iter() - .zip(results.active_set_probability.iter()) - .zip(results.reserve_set_probability.iter()) - .map(|((&mix_id, a), r)| InclusionProbability { - mix_id, - in_active: *a, - in_reserve: *r, - }) - .collect() -} diff --git a/nym-api/src/node_status_api/cache/mod.rs b/nym-api/src/node_status_api/cache/mod.rs index 6cd25189af4..01d1be13bba 100644 --- a/nym-api/src/node_status_api/cache/mod.rs +++ b/nym-api/src/node_status_api/cache/mod.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: GPL-3.0-only use self::data::NodeStatusCacheData; +use crate::support::caching::cache::UninitialisedCache; use crate::support::caching::Cache; use nym_api_requests::models::{GatewayBondAnnotated, MixNodeBondAnnotated, NodeAnnotation}; use nym_contracts_common::IdentityKey; @@ -22,9 +23,6 @@ pub mod refresher; #[derive(Debug, Error)] enum NodeStatusCacheError { - #[error("failed to simulate selection probabilities for mixnodes, not updating cache")] - SimulationFailed, - #[error("the current interval information is not available at the moment")] SourceDataMissing, @@ -32,6 +30,12 @@ enum NodeStatusCacheError { UnavailableDescribedCache, } +impl From for NodeStatusCacheError { + fn from(_: UninitialisedCache) -> Self { + NodeStatusCacheError::SourceDataMissing + } +} + /// A node status cache suitable for caching values computed in one sweep, such as active set /// inclusion probabilities that are computed for all mixnodes at the same time. /// diff --git a/nym-api/src/node_status_api/cache/node_sets.rs b/nym-api/src/node_status_api/cache/node_sets.rs index a0c4df4ee9e..09ea897c33f 100644 --- a/nym-api/src/node_status_api/cache/node_sets.rs +++ b/nym-api/src/node_status_api/cache/node_sets.rs @@ -1,7 +1,7 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::node_status_api::helpers::RewardedSetStatus; use crate::node_status_api::models::Uptime; use crate::node_status_api::reward_estimate::{compute_apy_from_reward, compute_reward_estimate}; @@ -9,7 +9,6 @@ use crate::nym_contract_cache::cache::data::ConfigScoreData; use crate::support::legacy_helpers::legacy_host_to_ips_and_hostname; use crate::support::storage::NymApiStorage; use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeDetailsWithLayer}; -use nym_api_requests::models::DescribedNodeType::{LegacyGateway, LegacyMixnode, NymNode}; use nym_api_requests::models::{ ConfigScore, DescribedNodeType, DetailedNodePerformance, GatewayBondAnnotated, MixNodeBondAnnotated, NodeAnnotation, NodePerformance, NymNodeDescription, RoutingScore, @@ -18,7 +17,7 @@ use nym_contracts_common::NaiveFloat; use nym_mixnet_contract_common::{Interval, NodeId, VersionScoreFormulaParams}; use nym_mixnet_contract_common::{NymNodeDetails, RewardingParams}; use nym_topology::CachedEpochRewardedSet; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use tracing::trace; pub(super) async fn get_mixnode_reliability_from_storage( @@ -166,7 +165,6 @@ pub(super) async fn annotate_legacy_mixnodes_nodes_with_details( interval_reward_params: RewardingParams, current_interval: Interval, rewarded_set: &CachedEpochRewardedSet, - blacklist: &HashSet, ) -> HashMap { let mut annotated = HashMap::new(); for mixnode in mixnodes { @@ -216,7 +214,8 @@ pub(super) async fn annotate_legacy_mixnodes_nodes_with_details( annotated.insert( mixnode.mix_id(), MixNodeBondAnnotated { - blacklisted: blacklist.contains(&mixnode.mix_id()), + // all legacy nodes are always blacklisted + blacklisted: true, mixnode_details: mixnode, stake_saturation, uncapped_stake_saturation, @@ -236,7 +235,6 @@ pub(crate) async fn annotate_legacy_gateways_with_details( storage: &NymApiStorage, gateway_bonds: Vec, current_interval: Interval, - blacklist: &HashSet, ) -> HashMap { let mut annotated = HashMap::new(); for gateway_bond in gateway_bonds { @@ -263,7 +261,8 @@ pub(crate) async fn annotate_legacy_gateways_with_details( annotated.insert( gateway_bond.node_id, GatewayBondAnnotated { - blacklisted: blacklist.contains(&gateway_bond.node_id), + // all legacy nodes are always blacklisted + blacklisted: true, gateway_bond, self_described: None, performance, @@ -291,8 +290,13 @@ pub(crate) async fn produce_node_annotations( for legacy_mix in legacy_mixnodes { let node_id = legacy_mix.mix_id(); - let routing_score = - get_routing_score(storage, node_id, LegacyMixnode, current_interval).await; + let routing_score = get_routing_score( + storage, + node_id, + DescribedNodeType::LegacyMixnode, + current_interval, + ) + .await; let config_score = calculate_config_score(config_score_data, described_nodes.get_node(&node_id)); @@ -317,8 +321,13 @@ pub(crate) async fn produce_node_annotations( for legacy_gateway in legacy_gateways { let node_id = legacy_gateway.node_id; - let routing_score = - get_routing_score(storage, node_id, LegacyGateway, current_interval).await; + let routing_score = get_routing_score( + storage, + node_id, + DescribedNodeType::LegacyGateway, + current_interval, + ) + .await; let config_score = calculate_config_score(config_score_data, described_nodes.get_node(&node_id)); @@ -343,7 +352,13 @@ pub(crate) async fn produce_node_annotations( for nym_node in nym_nodes { let node_id = nym_node.node_id(); - let routing_score = get_routing_score(storage, node_id, NymNode, current_interval).await; + let routing_score = get_routing_score( + storage, + node_id, + DescribedNodeType::NymNode, + current_interval, + ) + .await; let config_score = calculate_config_score(config_score_data, described_nodes.get_node(&node_id)); diff --git a/nym-api/src/node_status_api/cache/refresher.rs b/nym-api/src/node_status_api/cache/refresher.rs index b9f4aed5075..0666b84594a 100644 --- a/nym-api/src/node_status_api/cache/refresher.rs +++ b/nym-api/src/node_status_api/cache/refresher.rs @@ -2,14 +2,12 @@ // SPDX-License-Identifier: GPL-3.0-only use super::NodeStatusCache; -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::node_status_api::cache::node_sets::produce_node_annotations; use crate::support::caching::cache::SharedCache; use crate::{ - node_status_api::cache::{inclusion_probabilities, NodeStatusCacheError}, - nym_contract_cache::cache::NymContractCache, - storage::NymApiStorage, - support::caching::CacheNotification, + node_status_api::cache::NodeStatusCacheError, nym_contract_cache::cache::NymContractCache, + storage::NymApiStorage, support::caching::CacheNotification, }; use ::time::OffsetDateTime; use nym_task::TaskClient; @@ -17,7 +15,7 @@ use std::collections::HashMap; use std::time::Duration; use tokio::sync::watch; use tokio::time; -use tracing::{error, info, trace, warn}; +use tracing::{info, trace, warn}; // Long running task responsible for keeping the node status cache up-to-date. pub struct NodeStatusCacheRefresher { @@ -139,35 +137,16 @@ impl NodeStatusCacheRefresher { // Fetch contract cache data to work with let mixnode_details = self.contract_cache.legacy_mixnodes_all().await; - let interval_reward_params = self.contract_cache.interval_reward_params().await; - let current_interval = self.contract_cache.current_interval().await; - let rewarded_set = self.contract_cache.rewarded_set_owned().await; + let interval_reward_params = self.contract_cache.interval_reward_params().await?; + let current_interval = self.contract_cache.current_interval().await?; + let rewarded_set = self.contract_cache.rewarded_set_owned().await?; let gateway_bonds = self.contract_cache.legacy_gateways_all().await; let nym_nodes = self.contract_cache.nym_nodes().await; - let config_score_data = self - .contract_cache - .config_score_data_owned() - .await - .into_inner() - .ok_or(NodeStatusCacheError::SourceDataMissing)?; - - // get blacklists - let mixnodes_blacklist = self.contract_cache.mixnodes_blacklist().await; - let gateways_blacklist = self.contract_cache.gateways_blacklist().await; - - let interval_reward_params = - interval_reward_params.ok_or(NodeStatusCacheError::SourceDataMissing)?; - let current_interval = current_interval.ok_or(NodeStatusCacheError::SourceDataMissing)?; + let config_score_data = self.contract_cache.maybe_config_score_data().await?; // Compute inclusion probabilities - let inclusion_probabilities = inclusion_probabilities::InclusionProbabilities::compute( - &mixnode_details, - interval_reward_params, - ) - .ok_or_else(|| { - error!("Failed to simulate selection probabilities for mixnodes, not updating cache"); - NodeStatusCacheError::SimulationFailed - })?; + // (all legacy mixnodes have 0% chance of being selected) + let inclusion_probabilities = crate::node_status_api::cache::inclusion_probabilities::InclusionProbabilities::legacy_zero(&mixnode_details); let Ok(described) = self.described_cache.get().await else { return Err(NodeStatusCacheError::UnavailableDescribedCache); @@ -198,7 +177,6 @@ impl NodeStatusCacheRefresher { interval_reward_params, current_interval, &rewarded_set, - &mixnodes_blacklist, ) .await; @@ -207,7 +185,6 @@ impl NodeStatusCacheRefresher { &self.storage, gateway_bonds, current_interval, - &gateways_blacklist, ) .await; diff --git a/nym-api/src/node_status_api/handlers/mod.rs b/nym-api/src/node_status_api/handlers/mod.rs index 31a1e306b1b..f83dd5a12e3 100644 --- a/nym-api/src/node_status_api/handlers/mod.rs +++ b/nym-api/src/node_status_api/handlers/mod.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::node_status_api::models::AxumResult; -use crate::support::caching::cache::UninitialisedCache; use crate::support::http::state::AppState; use axum::extract::{Query, State}; use axum::routing::get; @@ -58,11 +57,7 @@ async fn config_score_details( ) -> AxumResult> { let output = output.output.unwrap_or_default(); - let data = state - .nym_contract_cache() - .maybe_config_score_data_owned() - .await - .ok_or(UninitialisedCache)?; + let data = state.nym_contract_cache().maybe_config_score_data().await?; - Ok(output.to_response(data.into_inner().into())) + Ok(output.to_response(data.into())) } diff --git a/nym-api/src/node_status_api/helpers.rs b/nym-api/src/node_status_api/helpers.rs index 363c332c934..9e4fcba62ff 100644 --- a/nym-api/src/node_status_api/helpers.rs +++ b/nym-api/src/node_status_api/helpers.rs @@ -198,16 +198,12 @@ pub(crate) async fn _get_mixnode_reward_estimation( .await .ok_or_else(|| AxumErrorResponse::not_found("mixnode bond not found"))?; - let reward_params = contract_cache.interval_reward_params().await; - let as_at = reward_params.timestamp(); - let reward_params = reward_params - .into_inner() - .ok_or_else(AxumErrorResponse::internal)?; - let current_interval = contract_cache - .current_interval() - .await - .into_inner() - .ok_or_else(AxumErrorResponse::internal)?; + let reward_params = contract_cache.interval_reward_params().await?; + let current_interval = contract_cache.current_interval().await?; + + // in some very rare edge cases this value might be off (as internals might have got updated between + // queries for `reward_params` and `current_interval`, but timestamp is only informative to begin with) + let as_at = contract_cache.cache_timestamp().await; let reward_estimation = compute_reward_estimate( &mixnode.mixnode_details, @@ -236,16 +232,12 @@ pub(crate) async fn _compute_mixnode_reward_estimation( .await .ok_or_else(|| AxumErrorResponse::not_found("mixnode bond not found"))?; - let reward_params = contract_cache.interval_reward_params().await; - let as_at = reward_params.timestamp(); - let reward_params = reward_params - .into_inner() - .ok_or_else(AxumErrorResponse::internal)?; - let current_interval = contract_cache - .current_interval() - .await - .into_inner() - .ok_or_else(AxumErrorResponse::internal)?; + let reward_params = contract_cache.interval_reward_params().await?; + let current_interval = contract_cache.current_interval().await?; + + // in some very rare edge cases this value might be off (as internals might have got updated between + // queries for `reward_params` and `current_interval`, but timestamp is only informative to begin with) + let as_at = contract_cache.cache_timestamp().await; // For these parameters we either use the provided ones, or fall back to the system ones let performance = user_reward_param.performance.unwrap_or(mixnode.performance); @@ -321,11 +313,8 @@ pub(crate) async fn _get_mixnode_stake_saturation( // Recompute the stake saturation just so that we can confidently state that the `as_at` // field is consistent and correct. Luckily this is very cheap. - let reward_params = contract_cache.interval_reward_params().await; - let as_at = reward_params.timestamp(); - let rewarding_params = reward_params - .into_inner() - .ok_or_else(AxumErrorResponse::internal)?; + let rewarding_params = contract_cache.interval_reward_params().await?; + let as_at = contract_cache.cache_timestamp().await; Ok(StakeSaturationResponse { saturation: mixnode diff --git a/nym-api/src/node_status_api/mod.rs b/nym-api/src/node_status_api/mod.rs index b7b02c327c6..0b2a0db4786 100644 --- a/nym-api/src/node_status_api/mod.rs +++ b/nym-api/src/node_status_api/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: GPL-3.0-only use self::cache::refresher::NodeStatusCacheRefresher; -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::support::caching::cache::SharedCache; use crate::support::config; use crate::{ diff --git a/nym-api/src/node_status_api/models.rs b/nym-api/src/node_status_api/models.rs index cb7672695f8..76a9d0199ba 100644 --- a/nym-api/src/node_status_api/models.rs +++ b/nym-api/src/node_status_api/models.rs @@ -322,6 +322,7 @@ impl From for OldHistoricalUptimeResponse { // TODO rocket remove smurf name after eliminating `rocket` pub(crate) type AxumResult = Result; +pub(crate) type ApiResult = AxumResult; // #[derive(ToSchema, ToResponse)] // #[schema(title = "ErrorResponse")] diff --git a/nym-api/src/nym_contract_cache/cache/data.rs b/nym-api/src/nym_contract_cache/cache/data.rs index ba2c7d91154..f54f9c48f1f 100644 --- a/nym-api/src/nym_contract_cache/cache/data.rs +++ b/nym-api/src/nym_contract_cache/cache/data.rs @@ -1,17 +1,16 @@ // Copyright 2022-2023 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::support::caching::Cache; use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeDetailsWithLayer}; use nym_api_requests::models::ConfigScoreDataResponse; use nym_contracts_common::ContractBuildInformation; use nym_mixnet_contract_common::{ - ConfigScoreParams, HistoricalNymNodeVersionEntry, Interval, NodeId, NymNodeDetails, + ConfigScoreParams, HistoricalNymNodeVersionEntry, Interval, KeyRotationState, NymNodeDetails, RewardingParams, }; use nym_topology::CachedEpochRewardedSet; use nym_validator_client::nyxd::AccountId; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; #[derive(Clone)] pub(crate) struct ConfigScoreData { @@ -33,39 +32,17 @@ impl From for ConfigScoreDataResponse { } pub(crate) struct ContractCacheData { - pub(crate) legacy_mixnodes: Cache>, - pub(crate) legacy_gateways: Cache>, - pub(crate) nym_nodes: Cache>, - pub(crate) rewarded_set: Cache, + pub(crate) legacy_mixnodes: Vec, + pub(crate) legacy_gateways: Vec, + pub(crate) nym_nodes: Vec, + pub(crate) rewarded_set: CachedEpochRewardedSet, - // this purposely does not deal with nym-nodes as they don't have a concept of a blacklist. - // instead clients are meant to be filtering out them themselves based on the provided scores. - pub(crate) legacy_mixnodes_blacklist: Cache>, - pub(crate) legacy_gateways_blacklist: Cache>, + pub(crate) config_score_data: ConfigScoreData, + pub(crate) current_reward_params: RewardingParams, + pub(crate) current_interval: Interval, + pub(crate) key_rotation_state: KeyRotationState, - pub(crate) config_score_data: Cache>, - pub(crate) current_reward_params: Cache>, - pub(crate) current_interval: Cache>, - - pub(crate) contracts_info: Cache, -} - -impl ContractCacheData { - pub(crate) fn new() -> Self { - ContractCacheData { - legacy_mixnodes: Cache::default(), - legacy_gateways: Cache::default(), - nym_nodes: Default::default(), - rewarded_set: Cache::default(), - - legacy_mixnodes_blacklist: Cache::default(), - legacy_gateways_blacklist: Cache::default(), - current_interval: Cache::default(), - current_reward_params: Cache::default(), - contracts_info: Cache::default(), - config_score_data: Default::default(), - } - } + pub(crate) contracts_info: CachedContractsInfo, } type ContractAddress = String; diff --git a/nym-api/src/nym_contract_cache/cache/mod.rs b/nym-api/src/nym_contract_cache/cache/mod.rs index 147fe604064..e73dde0f41a 100644 --- a/nym-api/src/nym_contract_cache/cache/mod.rs +++ b/nym-api/src/nym_contract_cache/cache/mod.rs @@ -1,8 +1,9 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::node_describe_cache::RefreshData; +use crate::node_describe_cache::refresh::RefreshData; use crate::nym_contract_cache::cache::data::{CachedContractsInfo, ConfigScoreData}; +use crate::support::caching::cache::{SharedCache, UninitialisedCache}; use crate::support::caching::Cache; use data::ContractCacheData; use nym_api_requests::legacy::{ @@ -11,216 +12,86 @@ use nym_api_requests::legacy::{ use nym_api_requests::models::MixnodeStatus; use nym_crypto::asymmetric::ed25519; use nym_mixnet_contract_common::{ - ConfigScoreParams, EpochRewardedSet, HistoricalNymNodeVersionEntry, Interval, NodeId, - NymNodeDetails, RewardingParams, + Interval, KeyRotationState, NodeId, NymNodeDetails, RewardingParams, }; use nym_topology::CachedEpochRewardedSet; -use std::{ - collections::HashSet, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, - time::Duration, -}; -use tokio::sync::{RwLock, RwLockReadGuard}; -use tokio::time; -use tracing::{debug, error}; +use time::OffsetDateTime; +use tokio::sync::RwLockReadGuard; pub(crate) mod data; pub(crate) mod refresher; -const CACHE_TIMEOUT_MS: u64 = 100; - #[derive(Clone)] pub struct NymContractCache { - pub(crate) initialised: Arc, - pub(crate) inner: Arc>, + pub(crate) inner: SharedCache, } impl NymContractCache { pub(crate) fn new() -> Self { NymContractCache { - initialised: Arc::new(AtomicBool::new(false)), - inner: Arc::new(RwLock::new(ContractCacheData::new())), + inner: SharedCache::new(), } } - /// Returns a copy of the current cache data. + pub(crate) fn inner(&self) -> SharedCache { + self.inner.clone() + } + async fn get_owned( &self, - fn_arg: impl FnOnce(RwLockReadGuard<'_, ContractCacheData>) -> Cache, - ) -> Option> { - match time::timeout(Duration::from_millis(CACHE_TIMEOUT_MS), self.inner.read()).await { - Ok(cache) => Some(fn_arg(cache)), - Err(e) => { - error!("{e}"); - None - } - } + fn_arg: impl FnOnce(&ContractCacheData) -> T, + ) -> Result { + Ok(fn_arg(&**self.inner.get().await?)) } async fn get<'a, T: 'a>( &'a self, - fn_arg: impl FnOnce(&ContractCacheData) -> &Cache, - ) -> Option>> { - match time::timeout(Duration::from_millis(CACHE_TIMEOUT_MS), self.inner.read()).await { - Ok(cache) => Some(RwLockReadGuard::map(cache, |item| fn_arg(item))), - Err(e) => { - error!("{e}"); - None - } - } - } - - #[allow(clippy::too_many_arguments)] - pub(crate) async fn update( - &self, - mixnodes: Vec, - gateways: Vec, - nym_nodes: Vec, - rewarded_set: EpochRewardedSet, - config_score_params: ConfigScoreParams, - nym_node_version_history: Vec, - rewarding_params: RewardingParams, - current_interval: Interval, - nym_contracts_info: CachedContractsInfo, - ) { - match time::timeout(Duration::from_millis(100), self.inner.write()).await { - Ok(mut cache) => { - let config_score_data = ConfigScoreData { - config_score_params, - nym_node_version_history, - }; - - cache.legacy_mixnodes.unchecked_update(mixnodes); - cache.legacy_gateways.unchecked_update(gateways); - cache.nym_nodes.unchecked_update(nym_nodes); - cache.rewarded_set.unchecked_update(rewarded_set); - cache.config_score_data.unchecked_update(config_score_data); - cache - .current_reward_params - .unchecked_update(Some(rewarding_params)); - cache - .current_interval - .unchecked_update(Some(current_interval)); - cache.contracts_info.unchecked_update(nym_contracts_info) - } - Err(err) => { - error!("{err}"); - } - } - } - - pub async fn mixnodes_blacklist(&self) -> Cache> { - self.get_owned(|cache| cache.legacy_mixnodes_blacklist.clone_cache()) - .await - .unwrap_or_default() - } - - pub async fn gateways_blacklist(&self) -> Cache> { - self.get_owned(|cache| cache.legacy_gateways_blacklist.clone_cache()) - .await - .unwrap_or_default() - } - - pub async fn update_mixnodes_blacklist(&self, add: HashSet, remove: HashSet) { - let blacklist = self.mixnodes_blacklist().await; - let mut blacklist = blacklist.union(&add).cloned().collect::>(); - let to_remove = blacklist - .intersection(&remove) - .cloned() - .collect::>(); - for key in to_remove { - blacklist.remove(&key); - } - match time::timeout(Duration::from_millis(100), self.inner.write()).await { - Ok(mut cache) => { - cache.legacy_mixnodes_blacklist.unchecked_update(blacklist); - } - Err(err) => { - error!("Failed to update mixnodes blacklist: {err}"); - } - } + fn_arg: impl FnOnce(&Cache) -> &T, + ) -> Result, UninitialisedCache> { + let guard = self.inner.get().await?; + Ok(RwLockReadGuard::map(guard, fn_arg)) } - pub async fn update_gateways_blacklist(&self, add: HashSet, remove: HashSet) { - let blacklist = self.gateways_blacklist().await; - let mut blacklist = blacklist.union(&add).cloned().collect::>(); - let to_remove = blacklist - .intersection(&remove) - .cloned() - .collect::>(); - for key in to_remove { - blacklist.remove(&key); - } - match time::timeout(Duration::from_millis(100), self.inner.write()).await { - Ok(mut cache) => { - cache.legacy_gateways_blacklist.unchecked_update(blacklist); - } - Err(err) => { - error!("Failed to update gateways blacklist: {err}"); - } - } - } - - pub async fn legacy_mixnodes_filtered(&self) -> Vec { - let mixnodes = self.legacy_mixnodes_all().await; - if mixnodes.is_empty() { - return Vec::new(); - } - let blacklist = self.mixnodes_blacklist().await; + pub async fn cache_timestamp(&self) -> OffsetDateTime { + let Ok(cache) = self.inner.get().await else { + return OffsetDateTime::UNIX_EPOCH; + }; - if !blacklist.is_empty() { - mixnodes - .into_iter() - .filter(|mix| !blacklist.contains(&mix.mix_id())) - .collect() - } else { - mixnodes - } + cache.timestamp() } pub async fn all_cached_legacy_mixnodes( &self, - ) -> Option>>> { - self.get(|c| &c.legacy_mixnodes).await + ) -> Option>> { + self.get(|c| &c.legacy_mixnodes).await.ok() } pub async fn legacy_gateway_owner(&self, node_id: NodeId) -> Option { - self.get(|c| &c.legacy_gateways) - .await? - .iter() - .find(|g| g.node_id == node_id) - .map(|g| g.owner.to_string()) - } + let Ok(cache) = self.inner.get().await else { + return Default::default(); + }; - #[allow(dead_code)] - pub async fn legacy_mixnode_owner(&self, node_id: NodeId) -> Option { - self.get(|c| &c.legacy_mixnodes) - .await? + cache + .legacy_gateways .iter() - .find(|m| m.mix_id() == node_id) - .map(|m| m.bond_information.owner.to_string()) + .find(|gateway| gateway.node_id == node_id) + .map(|gateway| gateway.owner.to_string()) } pub async fn all_cached_legacy_gateways( &self, - ) -> Option>>> { - self.get(|c| &c.legacy_gateways).await + ) -> Option>> { + self.get(|c| &c.legacy_gateways).await.ok() } - pub async fn all_cached_nym_nodes( - &self, - ) -> Option>>> { - self.get(|c| &c.nym_nodes).await + pub async fn all_cached_nym_nodes(&self) -> Option>> { + self.get(|c| &c.nym_nodes).await.ok() } pub async fn legacy_mixnodes_all(&self) -> Vec { - self.get_owned(|cache| cache.legacy_mixnodes.clone_cache()) + self.get_owned(|c| c.legacy_mixnodes.clone()) .await .unwrap_or_default() - .into_inner() } pub async fn legacy_mixnodes_all_basic(&self) -> Vec { @@ -231,155 +102,91 @@ impl NymContractCache { .collect() } - pub async fn legacy_gateways_filtered(&self) -> Vec { - let gateways = self.legacy_gateways_all().await; - if gateways.is_empty() { - return Vec::new(); - } - - let blacklist = self.gateways_blacklist().await; - - if !blacklist.is_empty() { - gateways - .into_iter() - .filter(|gw| !blacklist.contains(&gw.node_id)) - .collect() - } else { - gateways - } - } - pub async fn legacy_gateways_all(&self) -> Vec { - self.get_owned(|cache| cache.legacy_gateways.clone_cache()) + self.get_owned(|c| c.legacy_gateways.clone()) .await .unwrap_or_default() - .into_inner() } pub async fn nym_nodes(&self) -> Vec { - self.get_owned(|cache| cache.nym_nodes.clone_cache()) + self.get_owned(|c| c.nym_nodes.clone()) .await .unwrap_or_default() - .into_inner() } - pub async fn rewarded_set(&self) -> Option>> { - self.get(|cache| &cache.rewarded_set).await + pub async fn cached_rewarded_set( + &self, + ) -> Result, UninitialisedCache> { + let cache = self.inner.get().await?; + Ok(Cache::as_mapped(&cache, |c| c.rewarded_set.clone())) } - pub async fn rewarded_set_owned(&self) -> Cache { - self.get_owned(|cache| cache.rewarded_set.clone_cache()) - .await - .unwrap_or_default() + pub async fn rewarded_set(&self) -> Option> { + self.get(|c| &c.rewarded_set).await.ok() } - pub async fn maybe_config_score_data_owned(&self) -> Option> { - self.config_score_data_owned().await.transpose() + pub async fn rewarded_set_owned(&self) -> Result { + self.get_owned(|c| c.rewarded_set.clone()).await } - pub async fn config_score_data_owned(&self) -> Cache> { - self.get_owned(|cache| cache.config_score_data.clone_cache()) - .await - .unwrap_or_default() + pub async fn maybe_config_score_data(&self) -> Result { + self.get_owned(|c| c.config_score_data.clone()).await } - pub async fn legacy_v1_rewarded_set_mixnodes(&self) -> Vec { - let Some(rewarded_set) = self.rewarded_set().await else { - return Vec::new(); - }; - - let mut rewarded_nodes = rewarded_set - .active_mixnodes() - .into_iter() - .collect::>(); - - // rewarded mixnode = active or standby - for standby in &rewarded_set.standby { - rewarded_nodes.insert(*standby); - } - - self.legacy_mixnodes_all() - .await - .into_iter() - .filter(|m| rewarded_nodes.contains(&m.mix_id())) - .collect() + pub(crate) async fn interval_reward_params( + &self, + ) -> Result { + self.get_owned(|c| c.current_reward_params).await } - pub async fn legacy_v1_active_set_mixnodes(&self) -> Vec { - let Some(rewarded_set) = self.rewarded_set().await else { - return Vec::new(); - }; - - let active_nodes = rewarded_set - .active_mixnodes() - .into_iter() - .collect::>(); - - self.legacy_mixnodes_all() - .await - .into_iter() - .filter(|m| active_nodes.contains(&m.mix_id())) - .collect() + pub(crate) async fn current_interval(&self) -> Result { + self.get_owned(|c| c.current_interval).await } - pub(crate) async fn interval_reward_params(&self) -> Cache> { - self.get_owned(|cache| cache.current_reward_params.clone_cache()) - .await - .unwrap_or_default() + pub(crate) async fn get_key_rotation_state( + &self, + ) -> Result { + self.get_owned(|c| c.key_rotation_state).await } - pub(crate) async fn current_interval(&self) -> Cache> { - self.get_owned(|cache| cache.current_interval.clone_cache()) - .await - .unwrap_or_default() + pub(crate) async fn current_key_rotation_id(&self) -> Result { + let guard = self.inner.get().await?; + let current_absolute_epoch_id = guard.current_interval.current_epoch_absolute_id(); + Ok(guard + .key_rotation_state + .key_rotation_id(current_absolute_epoch_id)) } - pub(crate) async fn contract_details(&self) -> Cache { - self.get_owned(|cache| cache.contracts_info.clone_cache()) + pub(crate) async fn contract_details(&self) -> CachedContractsInfo { + self.get_owned(|c| c.contracts_info.clone()) .await .unwrap_or_default() } - pub async fn legacy_mixnode_details( - &self, - mix_id: NodeId, - ) -> (Option, MixnodeStatus) { - // the old behaviour was to get the nodes from the filtered list, so let's not change it here - let rewarded_set = self.rewarded_set_owned().await; - let all_bonded = &self.legacy_mixnodes_filtered().await; - let Some(bond) = all_bonded.iter().find(|mix| mix.mix_id() == mix_id) else { - return (None, MixnodeStatus::NotFound); + pub async fn mixnode_status(&self, mix_id: NodeId) -> MixnodeStatus { + let Ok(cache) = self.inner.get().await else { + return Default::default(); }; - if rewarded_set.is_active_mixnode(&mix_id) { - return (Some(bond.clone()), MixnodeStatus::Active); - } - - if rewarded_set.is_standby(&mix_id) { - return (Some(bond.clone()), MixnodeStatus::Standby); + if cache.legacy_mixnodes.iter().any(|n| n.mix_id() == mix_id) { + MixnodeStatus::Inactive + } else { + MixnodeStatus::NotFound } - - (Some(bond.clone()), MixnodeStatus::Inactive) - } - - pub async fn mixnode_status(&self, mix_id: NodeId) -> MixnodeStatus { - self.legacy_mixnode_details(mix_id).await.1 } pub async fn get_node_refresh_data( &self, node_identity: ed25519::PublicKey, ) -> Option { - if !self.initialised() { - return None; - } - - let inner = self.inner.read().await; + let Ok(cache) = self.inner.get().await else { + return Default::default(); + }; let encoded_identity = node_identity.to_base58_string(); // 1. check nymnodes - if let Some(nym_node) = inner + if let Some(nym_node) = cache .nym_nodes .iter() .find(|n| n.bond_information.identity() == encoded_identity) @@ -388,7 +195,7 @@ impl NymContractCache { } // 2. check legacy mixnodes - if let Some(mixnode) = inner + if let Some(mixnode) = cache .legacy_mixnodes .iter() .find(|n| n.bond_information.identity() == encoded_identity) @@ -397,7 +204,7 @@ impl NymContractCache { } // 3. check legacy gateways - if let Some(gateway) = inner + if let Some(gateway) = cache .legacy_gateways .iter() .find(|n| n.identity() == &encoded_identity) @@ -408,19 +215,7 @@ impl NymContractCache { None } - pub fn initialised(&self) -> bool { - self.initialised.load(Ordering::Relaxed) - } - - pub(crate) async fn wait_for_initial_values(&self) { - let initialisation_backoff = Duration::from_secs(5); - loop { - if self.initialised() { - break; - } else { - debug!("Validator cache hasn't been initialised yet - waiting for {:?} before trying again", initialisation_backoff); - tokio::time::sleep(initialisation_backoff).await; - } - } + pub(crate) async fn naive_wait_for_initial_values(&self) { + self.inner.naive_wait_for_initial_values().await } } diff --git a/nym-api/src/nym_contract_cache/cache/refresher.rs b/nym-api/src/nym_contract_cache/cache/refresher.rs index 787d299de52..87af6d2d049 100644 --- a/nym-api/src/nym_contract_cache/cache/refresher.rs +++ b/nym-api/src/nym_contract_cache/cache/refresher.rs @@ -1,56 +1,47 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use super::NymContractCache; -use crate::nym_contract_cache::cache::data::{CachedContractInfo, CachedContractsInfo}; +use crate::nym_contract_cache::cache::data::{ + CachedContractInfo, CachedContractsInfo, ConfigScoreData, ContractCacheData, +}; use crate::nyxd::Client; -use crate::support::caching::CacheNotification; +use crate::support::caching::refresher::CacheItemProvider; use anyhow::Result; +use async_trait::async_trait; use nym_api_requests::legacy::{ LegacyGatewayBondWithId, LegacyMixNodeBondWithLayer, LegacyMixNodeDetailsWithLayer, }; -use nym_mixnet_contract_common::{EpochRewardedSet, LegacyMixLayer}; -use nym_task::TaskClient; +use nym_mixnet_contract_common::LegacyMixLayer; use nym_validator_client::nyxd::contract_traits::{ MixnetQueryClient, NymContractsProvider, VestingQueryClient, }; +use nym_validator_client::nyxd::error::NyxdError; use rand::prelude::SliceRandom; use rand::rngs::OsRng; +use std::collections::HashMap; use std::collections::HashSet; -use std::{collections::HashMap, sync::atomic::Ordering, time::Duration}; -use tokio::sync::watch; -use tokio::time; -use tracing::{error, info, trace, warn}; +use tracing::info; -pub struct NymContractCacheRefresher { +pub struct ContractDataProvider { nyxd_client: Client, - cache: NymContractCache, - caching_interval: Duration, - - // Notify listeners that the cache has been updated - update_notifier: watch::Sender, } -impl NymContractCacheRefresher { - pub(crate) fn new( - nyxd_client: Client, - caching_interval: Duration, - cache: NymContractCache, - ) -> Self { - let (tx, _) = watch::channel(CacheNotification::Start); - NymContractCacheRefresher { - nyxd_client, - cache, - caching_interval, - update_notifier: tx, - } +#[async_trait] +impl CacheItemProvider for ContractDataProvider { + type Item = ContractCacheData; + type Error = NyxdError; + + async fn try_refresh(&self) -> std::result::Result { + self.refresh().await } +} - pub fn subscribe(&self) -> watch::Receiver { - self.update_notifier.subscribe() +impl ContractDataProvider { + pub(crate) fn new(nyxd_client: Client) -> Self { + ContractDataProvider { nyxd_client } } - async fn get_nym_contracts_info(&self) -> Result { + async fn get_nym_contracts_info(&self) -> Result { use crate::query_guard; let mut updated = HashMap::new(); @@ -112,8 +103,8 @@ impl NymContractCacheRefresher { Ok(updated) } - async fn refresh(&self) -> Result<()> { - let rewarding_params = self.nyxd_client.get_current_rewarding_parameters().await?; + async fn refresh(&self) -> Result { + let current_reward_params = self.nyxd_client.get_current_rewarding_parameters().await?; let current_interval = self.nyxd_client.get_current_interval().await?.interval; let nym_nodes = self.nyxd_client.get_nymnodes().await?; @@ -127,7 +118,7 @@ impl NymContractCacheRefresher { .map(|id| (id.identity, id.node_id)) .collect(); - let mut gateways = Vec::with_capacity(gateway_bonds.len()); + let mut legacy_gateways = Vec::with_capacity(gateway_bonds.len()); #[allow(clippy::panic)] for bond in gateway_bonds { // we explicitly panic here because that value MUST exist. @@ -138,10 +129,10 @@ impl NymContractCacheRefresher { bond.identity() ) }); - gateways.push(LegacyGatewayBondWithId { bond, node_id }) + legacy_gateways.push(LegacyGatewayBondWithId { bond, node_id }) } - let rewarded_set = self.get_rewarded_set().await; + let rewarded_set = self.nyxd_client.get_rewarded_set_nodes().await?; let layer1 = rewarded_set .assignment .layer1 @@ -164,7 +155,7 @@ impl NymContractCacheRefresher { LegacyMixLayer::Three, ]; let mut rng = OsRng; - let mut mixnodes = Vec::with_capacity(mixnode_details.len()); + let mut legacy_mixnodes = Vec::with_capacity(mixnode_details.len()); for detail in mixnode_details { // if node is not in the rewarded set, well. // slap a random layer on it because legacy clients don't understand a concept of layerless mixnodes @@ -180,7 +171,7 @@ impl NymContractCacheRefresher { layer_choices.choose(&mut rng).copied().unwrap() }; - mixnodes.push(LegacyMixNodeDetailsWithLayer { + legacy_mixnodes.push(LegacyMixNodeDetailsWithLayer { bond_information: LegacyMixNodeBondWithLayer { bond: detail.bond_information, layer, @@ -190,71 +181,31 @@ impl NymContractCacheRefresher { }) } + let key_rotation_state = self.nyxd_client.get_key_rotation_state().await?; let config_score_params = self.nyxd_client.get_config_score_params().await?; let nym_node_version_history = self.nyxd_client.get_nym_node_version_history().await?; - let contract_info = self.get_nym_contracts_info().await?; + let contracts_info = self.get_nym_contracts_info().await?; info!( "Updating validator cache. There are {} [legacy] mixnodes, {} [legacy] gateways and {} nym nodes", - mixnodes.len(), - gateways.len(), + legacy_mixnodes.len(), + legacy_gateways.len(), nym_nodes.len(), ); - self.cache - .update( - mixnodes, - gateways, - nym_nodes, - rewarded_set, + Ok(ContractCacheData { + legacy_mixnodes, + legacy_gateways, + nym_nodes, + rewarded_set: rewarded_set.into(), + config_score_data: ConfigScoreData { config_score_params, nym_node_version_history, - rewarding_params, - current_interval, - contract_info, - ) - .await; - - if let Err(err) = self.update_notifier.send(CacheNotification::Updated) { - warn!("Failed to notify validator cache refresh: {err}"); - } - - Ok(()) - } - - async fn get_rewarded_set(&self) -> EpochRewardedSet { - self.nyxd_client - .get_rewarded_set_nodes() - .await - .unwrap_or_default() - } - - pub(crate) async fn run(&self, mut shutdown: TaskClient) { - let mut interval = time::interval(self.caching_interval); - while !shutdown.is_shutdown() { - tokio::select! { - _ = interval.tick() => { - tokio::select! { - biased; - _ = shutdown.recv() => { - trace!("ValidatorCacheRefresher: Received shutdown"); - } - ret = self.refresh() => { - if let Err(err) = ret { - error!("Failed to refresh validator cache - {err}"); - } else { - // relaxed memory ordering is fine here. worst case scenario network monitor - // will just have to wait for an additional backoff to see the change. - // And so this will not really incur any performance penalties by setting it every loop iteration - self.cache.initialised.store(true, Ordering::Relaxed) - } - } - } - } - _ = shutdown.recv() => { - trace!("ValidatorCacheRefresher: Received shutdown"); - } - } - } + }, + current_reward_params, + current_interval, + key_rotation_state, + contracts_info, + }) } } diff --git a/nym-api/src/nym_contract_cache/handlers.rs b/nym-api/src/nym_contract_cache/handlers.rs index c40a64d8c47..7519b222d62 100644 --- a/nym-api/src/nym_contract_cache/handlers.rs +++ b/nym-api/src/nym_contract_cache/handlers.rs @@ -5,12 +5,13 @@ use crate::node_status_api::helpers::{ _get_active_set_legacy_mixnodes_detailed, _get_legacy_mixnodes_detailed, _get_rewarded_set_legacy_mixnodes_detailed, }; +use crate::node_status_api::models::ApiResult; use crate::support::http::state::AppState; use crate::support::legacy_helpers::{to_legacy_gateway, to_legacy_mixnode}; use axum::extract::{Query, State}; use axum::Router; use nym_api_requests::legacy::LegacyMixNodeDetailsWithLayer; -use nym_api_requests::models::MixNodeBondAnnotated; +use nym_api_requests::models::{KeyRotationInfoResponse, MixNodeBondAnnotated}; use nym_http_api_common::{FormattedResponse, OutputParams}; use nym_mixnet_contract_common::reward_params::Performance; use nym_mixnet_contract_common::{reward_params::RewardingParams, GatewayBond, Interval, NodeId}; @@ -49,6 +50,10 @@ pub(crate) fn nym_contract_cache_routes() -> Router { axum::routing::get(get_interval_reward_params), ) .route("/epoch/current", axum::routing::get(get_current_epoch)) + .route( + "/epoch/key-rotation-info", + axum::routing::get(get_current_key_rotation_info), + ) } #[utoipa::path( @@ -70,24 +75,24 @@ async fn get_mixnodes( State(state): State, ) -> FormattedResponse> { let output = output.output.unwrap_or_default(); - let mut out = state.nym_contract_cache().legacy_mixnodes_filtered().await; let Ok(describe_cache) = state.described_nodes_cache.get().await else { - return output.to_response(out); + return output.to_response(Vec::new()); }; let Some(migrated_nymnodes) = state.nym_contract_cache().all_cached_nym_nodes().await else { - return output.to_response(out); + return output.to_response(Vec::new()); }; let Ok(annotations) = state.node_annotations().await else { - return output.to_response(out); + return output.to_response(Vec::new()); }; // safety: valid percentage value #[allow(clippy::unwrap_used)] let p50 = Performance::from_percentage_value(50).unwrap(); + let mut nodes = Vec::new(); for nym_node in &**migrated_nymnodes { // if we can't get it self-described data, ignore it let Some(description) = describe_cache.get_description(&nym_node.node_id()) else { @@ -107,10 +112,10 @@ async fn get_mixnodes( } let node = to_legacy_mixnode(nym_node, description); - out.push(node); + nodes.push(node); } - output.to_response(out) + output.to_response(nodes) } // DEPRECATED: this endpoint now lives in `node_status_api`. Once all consumers are updated, @@ -163,25 +168,18 @@ async fn get_gateways( ) -> FormattedResponse> { let output = output.output.unwrap_or_default(); - // legacy - let mut out: Vec = state - .nym_contract_cache() - .legacy_gateways_filtered() - .await - .into_iter() - .map(Into::into) - .collect(); + let mut nodes = Vec::new(); let Ok(describe_cache) = state.described_nodes_cache.get().await else { - return output.to_response(out); + return output.to_response(nodes); }; let Some(migrated_nymnodes) = state.nym_contract_cache().all_cached_nym_nodes().await else { - return output.to_response(out); + return output.to_response(nodes); }; let Ok(annotations) = state.node_annotations().await else { - return output.to_response(out); + return output.to_response(nodes); }; // safety: valid percentage value @@ -207,10 +205,10 @@ async fn get_gateways( } let node = to_legacy_gateway(nym_node, description); - out.push(node); + nodes.push(node); } - output.to_response(out) + output.to_response(nodes) } #[utoipa::path( @@ -229,17 +227,11 @@ async fn get_gateways( #[deprecated] async fn get_rewarded_set( Query(output): Query, - State(state): State, + State(_state): State, ) -> FormattedResponse> { let output = output.output.unwrap_or_default(); - output.to_response( - state - .nym_contract_cache() - .legacy_v1_rewarded_set_mixnodes() - .await - .clone(), - ) + output.to_response(Vec::new()) } // DEPRECATED: this endpoint now lives in `node_status_api`. Once all consumers are updated, @@ -298,11 +290,7 @@ async fn get_active_set( ) -> FormattedResponse> { let output = output.output.unwrap_or_default(); - let mut out = state - .nym_contract_cache() - .legacy_v1_active_set_mixnodes() - .await - .clone(); + let mut out = Vec::new(); let Some(rewarded_set) = state.nym_contract_cache().rewarded_set().await else { return output.to_response(out); @@ -410,16 +398,11 @@ async fn get_blacklisted_mixnodes( ) -> FormattedResponse>> { let output = output.output.unwrap_or_default(); - let blacklist = state - .nym_contract_cache() - .mixnodes_blacklist() - .await - .to_owned(); - if blacklist.is_empty() { - output.to_response(None) - } else { - output.to_response(Some(blacklist)) - } + let cache = state.nym_contract_cache(); + + // since blacklist has been removed, the equivalent of a blacklisted node is a legacy node + let mixnodes = cache.legacy_mixnodes_all().await; + output.to_response(Some(mixnodes.into_iter().map(|m| m.mix_id()).collect())) } #[utoipa::path( @@ -443,19 +426,14 @@ async fn get_blacklisted_gateways( let output = output.output.unwrap_or_default(); let cache = state.nym_contract_cache(); - let blacklist = cache.gateways_blacklist().await.clone(); - if blacklist.is_empty() { - output.to_response(None) - } else { - let gateways = cache.legacy_gateways_all().await; - output.to_response(Some( - gateways - .into_iter() - .filter(|g| blacklist.contains(&g.node_id)) - .map(|g| g.gateway.identity_key.clone()) - .collect(), - )) - } + // since blacklist has been removed, the equivalent of a blacklisted node is a legacy node + let gateways = cache.legacy_gateways_all().await; + output.to_response(Some( + gateways + .into_iter() + .map(|g| g.gateway.identity_key.clone()) + .collect(), + )) } #[utoipa::path( @@ -482,7 +460,7 @@ async fn get_interval_reward_params( .nym_contract_cache() .interval_reward_params() .await - .to_owned(), + .ok(), ) } @@ -505,11 +483,38 @@ async fn get_current_epoch( ) -> FormattedResponse> { let output = output.output.unwrap_or_default(); - output.to_response( - state - .nym_contract_cache() - .current_interval() - .await - .to_owned(), - ) + output.to_response(state.nym_contract_cache().current_interval().await.ok()) +} + +// +#[utoipa::path( + tag = "contract-cache", + get, + path = "/key-rotation-info", + context_path = "/v1/epoch", + responses( + (status = 200, content( + (KeyRotationInfoResponse = "application/json"), + (KeyRotationInfoResponse = "application/yaml"), + (KeyRotationInfoResponse = "application/bincode") + )) + ), + params(OutputParams) +)] +async fn get_current_key_rotation_info( + Query(output): Query, + State(state): State, +) -> ApiResult> { + let output = output.output.unwrap_or_default(); + + let contract_cache = state.nym_contract_cache(); + let current_interval = contract_cache.current_interval().await?; + let key_rotation_state = contract_cache.get_key_rotation_state().await?; + + Ok(output.to_response(KeyRotationInfoResponse { + key_rotation_state, + current_absolute_epoch_id: current_interval.current_epoch_absolute_id(), + current_epoch_start: current_interval.current_epoch_start(), + epoch_duration: current_interval.epoch_length(), + })) } diff --git a/nym-api/src/nym_contract_cache/mod.rs b/nym-api/src/nym_contract_cache/mod.rs index 6ca509ef1ef..cdb6bcd8e05 100644 --- a/nym-api/src/nym_contract_cache/mod.rs +++ b/nym-api/src/nym_contract_cache/mod.rs @@ -1,29 +1,25 @@ // Copyright 2021-2023 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only +use crate::nym_contract_cache::cache::data::ContractCacheData; +use crate::nym_contract_cache::cache::refresher::ContractDataProvider; use crate::nym_contract_cache::cache::NymContractCache; -use crate::support::{self, config, nyxd}; -use nym_task::TaskManager; - -use self::cache::refresher::NymContractCacheRefresher; +use crate::support::caching::refresher::CacheRefresher; +use crate::support::{config, nyxd}; +use nym_validator_client::nyxd::error::NyxdError; pub(crate) mod cache; pub(crate) mod handlers; -pub(crate) fn start_refresher( +pub(crate) fn build_refresher( config: &config::NodeStatusAPI, nym_contract_cache_state: &NymContractCache, nyxd_client: nyxd::Client, - shutdown: &TaskManager, -) -> tokio::sync::watch::Receiver { - let nym_contract_cache_refresher = NymContractCacheRefresher::new( - nyxd_client, +) -> CacheRefresher { + CacheRefresher::new_with_initial_value( + Box::new(ContractDataProvider::new(nyxd_client)), config.debug.caching_interval, - nym_contract_cache_state.to_owned(), - ); - let nym_contract_cache_listener = nym_contract_cache_refresher.subscribe(); - let shutdown_listener = shutdown.subscribe(); - tokio::spawn(async move { nym_contract_cache_refresher.run(shutdown_listener).await }); - - nym_contract_cache_listener + nym_contract_cache_state.inner(), + ) + .named("contract-cache-refresher") } diff --git a/nym-api/src/nym_nodes/handlers/legacy.rs b/nym-api/src/nym_nodes/handlers/legacy.rs index 5910c758c94..bc36227c93a 100644 --- a/nym-api/src/nym_nodes/handlers/legacy.rs +++ b/nym-api/src/nym_nodes/handlers/legacy.rs @@ -5,7 +5,6 @@ use crate::support::http::state::AppState; use crate::support::legacy_helpers::{to_legacy_gateway, to_legacy_mixnode}; use axum::extract::{Query, State}; use axum::Router; -use nym_api_requests::legacy::LegacyMixNodeBondWithLayer; use nym_api_requests::models::{LegacyDescribedGateway, LegacyDescribedMixNode}; use nym_http_api_common::{FormattedResponse, OutputParams}; use tower_http::compression::CompressionLayer; @@ -43,29 +42,15 @@ async fn get_gateways_described( Query(output): Query, State(state): State, ) -> FormattedResponse> { - let contract_cache = state.nym_contract_cache(); let describe_cache = state.described_nodes_cache(); let output = output.output.unwrap_or_default(); - // legacy - let legacy = contract_cache.legacy_gateways_filtered().await; - - // if the self describe cache is unavailable, well, don't attach describe data and only return legacy gateways let Ok(describe_cache) = describe_cache.get().await else { - return output.to_response(legacy.into_iter().map(Into::into).collect()); + return output.to_response(Vec::new()); }; let migrated_nymnodes = state.nym_contract_cache().nym_nodes().await; - let mut out = Vec::new(); - - for legacy_bond in legacy { - out.push(LegacyDescribedGateway { - self_described: describe_cache - .get_description(&legacy_bond.node_id) - .cloned(), - bond: legacy_bond.bond, - }) - } + let mut described = Vec::new(); for nym_node in migrated_nymnodes { // we ALWAYS need description to set legacy fields @@ -77,13 +62,13 @@ async fn get_gateways_described( continue; } - out.push(LegacyDescribedGateway { + described.push(LegacyDescribedGateway { bond: to_legacy_gateway(&nym_node, description), self_described: Some(description.clone()), }) } - output.to_response(out) + output.to_response(described) } #[utoipa::path( @@ -104,31 +89,15 @@ async fn get_mixnodes_described( Query(output): Query, State(state): State, ) -> FormattedResponse> { - let contract_cache = state.nym_contract_cache(); let describe_cache = state.described_nodes_cache(); let output = output.output.unwrap_or_default(); - let legacy: Vec = contract_cache - .legacy_mixnodes_filtered() - .await - .into_iter() - .map(|m| m.bond_information) - .collect::>(); - - // if the self describe cache is unavailable, well, don't attach describe data and only return legacy mixnodes let Ok(describe_cache) = describe_cache.get().await else { - return output.to_response(legacy.into_iter().map(Into::into).collect()); + return output.to_response(Vec::new()); }; let migrated_nymnodes = state.nym_contract_cache().nym_nodes().await; - let mut out = Vec::new(); - - for legacy_bond in legacy { - out.push(LegacyDescribedMixNode { - self_described: describe_cache.get_description(&legacy_bond.mix_id).cloned(), - bond: legacy_bond, - }) - } + let mut described = Vec::new(); for nym_node in migrated_nymnodes { // we ALWAYS need description to set legacy fields @@ -140,11 +109,11 @@ async fn get_mixnodes_described( continue; } - out.push(LegacyDescribedMixNode { + described.push(LegacyDescribedMixNode { bond: to_legacy_mixnode(&nym_node, description).bond_information, self_described: Some(description.clone()), }) } - output.to_response(out) + output.to_response(described) } diff --git a/nym-api/src/nym_nodes/handlers/mod.rs b/nym-api/src/nym_nodes/handlers/mod.rs index bd9dd674d60..ed871b096c3 100644 --- a/nym-api/src/nym_nodes/handlers/mod.rs +++ b/nym-api/src/nym_nodes/handlers/mod.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::node_status_api::models::{AxumErrorResponse, AxumResult}; -use crate::support::caching::cache::UninitialisedCache; use crate::support::http::helpers::{NodeIdParam, PaginationRequest}; use crate::support::http::state::AppState; use axum::extract::{Path, Query, State}; @@ -25,7 +24,6 @@ use tower_http::compression::CompressionLayer; use utoipa::{IntoParams, ToSchema}; pub(crate) mod legacy; -pub(crate) mod unstable; pub(crate) fn nym_node_routes() -> Router { Router::new() @@ -69,17 +67,9 @@ async fn rewarded_set( ) -> AxumResult> { let output = output.output.unwrap_or_default(); - let cached_rewarded_set = state - .nym_contract_cache() - .rewarded_set() - .await - .map(|cache| cache.clone_cache()) - .ok_or(UninitialisedCache)? - .into_inner(); + let rewarded_set = state.nym_contract_cache().rewarded_set_owned().await?; - Ok(output.to_response( - nym_mixnet_contract_common::EpochRewardedSet::from(cached_rewarded_set).into(), - )) + Ok(output.to_response(nym_mixnet_contract_common::EpochRewardedSet::from(rewarded_set).into())) } #[utoipa::path( diff --git a/nym-api/src/nym_nodes/handlers/unstable/mod.rs b/nym-api/src/nym_nodes/handlers/unstable/mod.rs deleted file mode 100644 index 92d695ddfc1..00000000000 --- a/nym-api/src/nym_nodes/handlers/unstable/mod.rs +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: GPL-3.0-only - -//! All routes/nodes are split into three tiers: -//! -//! `/skimmed` -//! - used by clients -//! - returns the very basic information for routing purposes -//! -//! `/semi-skimmed` -//! - used by other nodes/VPN -//! - returns more additional information such noise keys -//! -//! `/full-fat` -//! - used by explorers, et al. -//! - returns almost everything there is about the nodes -//! -//! There's also additional split based on the role: -//! - `?role` => filters based on the specific role (mixnode/gateway/(in the future: entry/exit)) -//! - `/mixnodes/` => only returns mixnode role data -//! - `/gateway/` => only returns (entry) gateway role data - -use crate::node_status_api::models::{AxumErrorResponse, AxumResult}; -use crate::nym_nodes::handlers::unstable::full_fat::nodes_detailed; -use crate::nym_nodes::handlers::unstable::semi_skimmed::nodes_expanded; -use crate::nym_nodes::handlers::unstable::skimmed::{ - entry_gateways_basic_active, entry_gateways_basic_all, exit_gateways_basic_active, - exit_gateways_basic_all, mixnodes_basic_active, mixnodes_basic_all, nodes_basic_active, - nodes_basic_all, -}; -use crate::support::http::helpers::PaginationRequest; -use crate::support::http::state::AppState; -use axum::extract::{Query, State}; -use axum::routing::{get, post}; -use axum::{Json, Router}; -use nym_api_requests::nym_nodes::{ - NodeRoleQueryParam, NodesByAddressesRequestBody, NodesByAddressesResponse, -}; -use nym_http_api_common::{FormattedResponse, Output, OutputParams}; -use serde::Deserialize; -use std::collections::HashMap; -use tower_http::compression::CompressionLayer; - -pub(crate) mod full_fat; -mod helpers; -pub(crate) mod semi_skimmed; -pub(crate) mod skimmed; - -#[allow(deprecated)] -pub(crate) fn routes() -> Router { - Router::new() - .nest( - "/skimmed", - Router::new() - .route("/", get(nodes_basic_all)) - .route("/active", get(nodes_basic_active)) - .nest( - "/mixnodes", - Router::new() - .route("/active", get(mixnodes_basic_active)) - .route("/all", get(mixnodes_basic_all)), - ) - .nest( - "/entry-gateways", - Router::new() - .route("/active", get(entry_gateways_basic_active)) - .route("/all", get(entry_gateways_basic_all)), - ) - .nest( - "/exit-gateways", - Router::new() - .route("/active", get(exit_gateways_basic_active)) - .route("/all", get(exit_gateways_basic_all)), - ), - ) - .nest( - "/semi-skimmed", - Router::new().route("/", get(nodes_expanded)), - ) - .nest("/full-fat", Router::new().route("/", get(nodes_detailed))) - .route("/gateways/skimmed", get(skimmed::deprecated_gateways_basic)) - .route("/mixnodes/skimmed", get(skimmed::deprecated_mixnodes_basic)) - .route("/by-addresses", post(nodes_by_addresses)) - .layer(CompressionLayer::new()) -} - -#[derive(Debug, Deserialize, utoipa::IntoParams)] -struct NodesParamsWithRole { - #[param(inline)] - role: Option, - - #[allow(dead_code)] - semver_compatibility: Option, - no_legacy: Option, - page: Option, - per_page: Option, - - // Identifier for the current epoch of the topology state. When sent by a client we can check if - // the client already knows about the latest topology state, allowing a `no-updates` response - // instead of wasting bandwidth serving an unchanged topology. - epoch_id: Option, - - output: Option, -} - -#[derive(Debug, Deserialize, utoipa::IntoParams)] -#[into_params(parameter_in = Query)] -struct NodesParams { - #[allow(dead_code)] - semver_compatibility: Option, - no_legacy: Option, - page: Option, - per_page: Option, - - // Identifier for the current epoch of the topology state. When sent by a client we can check if - // the client already knows about the latest topology state, allowing a `no-updates` response - // instead of wasting bandwidth serving an unchanged topology. - epoch_id: Option, - output: Option, -} - -impl From for NodesParams { - fn from(params: NodesParamsWithRole) -> Self { - NodesParams { - semver_compatibility: params.semver_compatibility, - no_legacy: params.no_legacy, - page: params.page, - per_page: params.per_page, - epoch_id: params.epoch_id, - output: params.output, - } - } -} - -impl<'a> From<&'a NodesParams> for PaginationRequest { - fn from(params: &'a NodesParams) -> Self { - PaginationRequest { - output: params.output, - page: params.page, - per_page: params.per_page, - } - } -} - -#[utoipa::path( - tag = "Unstable Nym Nodes", - post, - request_body = NodesByAddressesRequestBody, - path = "/by-addresses", - context_path = "/v1/unstable/nym-nodes", - responses( - (status = 200, content( - (NodesByAddressesResponse = "application/json"), - (NodesByAddressesResponse = "application/yaml"), - (NodesByAddressesResponse = "application/bincode") - )) - ), - params(OutputParams) -)] -async fn nodes_by_addresses( - Query(output): Query, - state: State, - Json(body): Json, -) -> AxumResult> { - // if the request is too big, simply reject it - if body.addresses.len() > 100 { - return Err(AxumErrorResponse::bad_request( - "requested too many addresses", - )); - } - - let output = output.output.unwrap_or_default(); - - // TODO: perhaps introduce different cache because realistically nym-api will receive - // request for the same couple addresses from all nodes in quick succession - let describe_cache = state.describe_nodes_cache_data().await?; - - let mut existence = HashMap::new(); - for address in body.addresses { - existence.insert(address, describe_cache.node_with_address(address)); - } - - Ok(output.to_response(NodesByAddressesResponse { existence })) -} diff --git a/nym-api/src/nym_nodes/handlers/unstable/skimmed.rs b/nym-api/src/nym_nodes/handlers/unstable/skimmed.rs deleted file mode 100644 index f7931db1cf1..00000000000 --- a/nym-api/src/nym_nodes/handlers/unstable/skimmed.rs +++ /dev/null @@ -1,605 +0,0 @@ -// Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: GPL-3.0-only - -use crate::node_describe_cache::DescribedNodes; -use crate::node_status_api::models::{AxumErrorResponse, AxumResult}; -use crate::nym_nodes::handlers::unstable::helpers::{refreshed_at, LegacyAnnotation}; -use crate::nym_nodes::handlers::unstable::{NodesParams, NodesParamsWithRole}; -use crate::support::caching::Cache; -use crate::support::http::state::AppState; -use axum::extract::{Query, State}; -use nym_api_requests::models::{ - NodeAnnotation, NymNodeDescription, OffsetDateTimeJsonSchemaWrapper, -}; -use nym_api_requests::nym_nodes::{ - CachedNodesResponse, NodeRole, NodeRoleQueryParam, PaginatedCachedNodesResponse, SkimmedNode, -}; -use nym_api_requests::pagination::PaginatedResponse; -use nym_http_api_common::{FormattedResponse, Output}; -use nym_mixnet_contract_common::NodeId; -use nym_topology::CachedEpochRewardedSet; -use std::collections::HashMap; -use std::future::Future; -use std::time::Duration; -use tokio::sync::RwLockReadGuard; -use tracing::trace; -use utoipa::ToSchema; - -pub type PaginatedSkimmedNodes = - AxumResult>>; - -/// Given all relevant caches, build part of response for JUST Nym Nodes -fn build_nym_nodes_response<'a, NI>( - rewarded_set: &CachedEpochRewardedSet, - nym_nodes_subset: NI, - annotations: &HashMap, - active_only: bool, -) -> Vec -where - NI: Iterator + 'a, -{ - let mut nodes = Vec::new(); - for nym_node in nym_nodes_subset { - let node_id = nym_node.node_id; - - let role: NodeRole = rewarded_set.role(node_id).into(); - - // if the role is inactive, see if our filter allows it - if active_only && role.is_inactive() { - continue; - } - - // honestly, not sure under what exact circumstances this value could be missing, - // but in that case just use 0 performance - let annotation = annotations.get(&node_id).copied().unwrap_or_default(); - - nodes.push(nym_node.to_skimmed_node(role, annotation.last_24h_performance)); - } - nodes -} - -/// Given all relevant caches, add appropriate legacy nodes to the part of the response -fn add_legacy( - nodes: &mut Vec, - rewarded_set: &CachedEpochRewardedSet, - describe_cache: &DescribedNodes, - annotated_legacy_nodes: &HashMap, - active_only: bool, -) where - LN: LegacyAnnotation, -{ - for (node_id, legacy) in annotated_legacy_nodes.iter() { - let role: NodeRole = rewarded_set.role(*node_id).into(); - - // if the role is inactive, see if our filter allows it - if active_only && role.is_inactive() { - continue; - } - - // if we have self-described info, prefer it over contract data - if let Some(described) = describe_cache.get_node(node_id) { - nodes.push(described.to_skimmed_node(role, legacy.performance())) - } else { - match legacy.try_to_skimmed_node(role) { - Ok(node) => nodes.push(node), - Err(err) => { - let id = legacy.identity(); - trace!("node {id} is malformed: {err}") - } - } - } - } -} - -// hehe, what an abomination, but it's used in multiple different places and I hate copy-pasting code, -// especially if it has multiple loops, etc -async fn build_skimmed_nodes_response<'a, NI, LG, Fut, LN>( - state: &'a AppState, - Query(query_params): Query, - nym_nodes_subset: NI, - annotated_legacy_nodes_getter: LG, - active_only: bool, - output: Output, -) -> PaginatedSkimmedNodes -where - // iterator returning relevant subset of nym-nodes (like mixing nym-nodes, entries, etc.) - NI: Iterator + 'a, - - // async function that returns cache of appropriate legacy nodes (mixnodes or gateways) - LG: Fn(&'a AppState) -> Fut, - Fut: - Future>>, AxumErrorResponse>>, - - // the legacy node (MixNodeBondAnnotated or GatewayBondAnnotated) - LN: LegacyAnnotation + 'a, -{ - // TODO: implement it - let _ = query_params.per_page; - let _ = query_params.page; - - // 1. get the rewarded set - let rewarded_set = state.rewarded_set().await?; - - // 2. grab all annotations so that we could attach scores to the [nym] nodes - let annotations = state.node_annotations().await?; - - // 3. implicitly grab the relevant described nodes - // (ideally it'd be tied directly to the NI iterator, but I couldn't defeat the compiler) - let describe_cache = state.describe_nodes_cache_data().await?; - - let Some(interval) = state - .nym_contract_cache() - .current_interval() - .await - .to_owned() - else { - // if we can't obtain interval information, it means caches are not valid - return Err(AxumErrorResponse::service_unavailable()); - }; - - // 4.0 If the client indicates that they already know about the current topology send empty response - if let Some(client_known_epoch) = query_params.epoch_id { - if client_known_epoch == interval.current_epoch_id() { - return Ok(output.to_response(PaginatedCachedNodesResponse::no_updates())); - } - } - - // 4. start building the response - let mut nodes = - build_nym_nodes_response(&rewarded_set, nym_nodes_subset, &annotations, active_only); - - // 5. if we allow legacy nodes, repeat the procedure for them, otherwise return just nym-nodes - if let Some(true) = query_params.no_legacy { - // min of all caches - let refreshed_at = refreshed_at([ - rewarded_set.timestamp(), - annotations.timestamp(), - describe_cache.timestamp(), - ]); - - return Ok(output.to_response( - PaginatedCachedNodesResponse::new_full(refreshed_at, nodes).fresh(Some(interval)), - )); - } - - // 6. grab relevant legacy nodes - // (due to the existence of the legacy endpoints, we already have fully annotated data on them) - let annotated_legacy_nodes = annotated_legacy_nodes_getter(state).await?; - add_legacy( - &mut nodes, - &rewarded_set, - &describe_cache, - &annotated_legacy_nodes, - active_only, - ); - - // min of all caches - let refreshed_at = refreshed_at([ - rewarded_set.timestamp(), - annotations.timestamp(), - describe_cache.timestamp(), - annotated_legacy_nodes.timestamp(), - ]); - - let base_response = output.to_response( - PaginatedCachedNodesResponse::new_full(refreshed_at, nodes).fresh(Some(interval)), - ); - - if !active_only { - return Ok(base_response); - } - - // if caller requested only active nodes, the response is valid until the epoch changes - // (but add 2 minutes due to epoch transition not being instantaneous - let epoch_end = interval.current_epoch_end(); - let expiration = epoch_end + Duration::from_secs(120); - Ok(base_response.with_expires_header(expiration)) -} - -/// Deprecated query that gets ALL gateways -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/gateways/skimmed", - context_path = "/v1/unstable/nym-nodes", - responses( - (status = 200, content( - (CachedNodesResponse = "application/json"), - (CachedNodesResponse = "application/yaml"), - (CachedNodesResponse = "application/bincode") - )) - ), -)] -#[deprecated(note = "use '/v1/unstable/nym-nodes/entry-gateways/skimmed/all' instead")] -pub(super) async fn deprecated_gateways_basic( - state: State, - query_params: Query, -) -> AxumResult>> { - let output = query_params.output.unwrap_or_default(); - - // 1. call '/v1/unstable/skimmed/entry-gateways/all' - let all_gateways = entry_gateways_basic_all(state, query_params) - .await? - .into_inner(); - - // 3. return result - Ok(output.to_response(CachedNodesResponse { - refreshed_at: all_gateways.refreshed_at, - // 2. remove pagination - nodes: all_gateways.nodes.data, - })) -} - -/// Deprecated query that gets ACTIVE-ONLY mixnodes -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/mixnodes/skimmed", - context_path = "/v1/unstable/nym-nodes", - responses( - (status = 200, content( - (CachedNodesResponse = "application/json"), - (CachedNodesResponse = "application/yaml"), - (CachedNodesResponse = "application/bincode") - )) - ), -)] -#[deprecated(note = "use '/v1/unstable/nym-nodes/skimmed/mixnodes/active' instead")] -pub(super) async fn deprecated_mixnodes_basic( - state: State, - query_params: Query, -) -> AxumResult>> { - let output = query_params.output.unwrap_or_default(); - - // 1. call '/v1/unstable/nym-nodes/skimmed/mixnodes/active' - let active_mixnodes = mixnodes_basic_active(state, query_params) - .await? - .into_inner(); - - // 3. return result - Ok(output.to_response(CachedNodesResponse { - refreshed_at: active_mixnodes.refreshed_at, - // 2. remove pagination - nodes: active_mixnodes.nodes.data, - })) -} - -async fn nodes_basic( - state: State, - Query(query_params): Query, - active_only: bool, -) -> PaginatedSkimmedNodes { - let output = query_params.output.unwrap_or_default(); - - // unfortunately we have to build the response semi-manually here as we need to add two sources of legacy nodes - - // 1. grab all relevant described nym-nodes - let rewarded_set = state.rewarded_set().await?; - - let describe_cache = state.describe_nodes_cache_data().await?; - let all_nym_nodes = describe_cache.all_nym_nodes(); - let annotations = state.node_annotations().await?; - let legacy_mixnodes = state.legacy_mixnode_annotations().await?; - let legacy_gateways = state.legacy_gateways_annotations().await?; - - let mut nodes = - build_nym_nodes_response(&rewarded_set, all_nym_nodes, &annotations, active_only); - - // add legacy gateways to the response - add_legacy( - &mut nodes, - &rewarded_set, - &describe_cache, - &legacy_gateways, - active_only, - ); - - // add legacy mixnodes to the response - add_legacy( - &mut nodes, - &rewarded_set, - &describe_cache, - &legacy_mixnodes, - active_only, - ); - - // min of all caches - let refreshed_at = refreshed_at([ - rewarded_set.timestamp(), - annotations.timestamp(), - describe_cache.timestamp(), - legacy_mixnodes.timestamp(), - legacy_gateways.timestamp(), - ]); - - Ok(output.to_response(PaginatedCachedNodesResponse::new_full(refreshed_at, nodes))) -} - -#[allow(dead_code)] // not dead, used in OpenAPI docs -#[derive(ToSchema)] -#[schema(title = "PaginatedCachedNodesResponse")] -pub struct PaginatedCachedNodesResponseSchema { - pub refreshed_at: OffsetDateTimeJsonSchemaWrapper, - #[schema(value_type = SkimmedNode)] - pub nodes: PaginatedResponse, -} - -/// Return all Nym Nodes and optionally legacy mixnodes/gateways (if `no-legacy` flag is not used) -/// that are currently bonded. -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParamsWithRole), - path = "", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn nodes_basic_all( - state: State, - Query(query_params): Query, -) -> PaginatedSkimmedNodes { - if let Some(role) = query_params.role { - return match role { - NodeRoleQueryParam::ActiveMixnode => { - mixnodes_basic_all(state, Query(query_params.into())).await - } - NodeRoleQueryParam::EntryGateway => { - entry_gateways_basic_all(state, Query(query_params.into())).await - } - NodeRoleQueryParam::ExitGateway => { - exit_gateways_basic_all(state, Query(query_params.into())).await - } - }; - } - - nodes_basic(state, Query(query_params.into()), false).await -} - -/// Return Nym Nodes and optionally legacy mixnodes/gateways (if `no-legacy` flag is not used) -/// that are currently bonded and are in the **active set** -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/active", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn nodes_basic_active( - state: State, - Query(query_params): Query, -) -> PaginatedSkimmedNodes { - if let Some(role) = query_params.role { - return match role { - NodeRoleQueryParam::ActiveMixnode => { - mixnodes_basic_active(state, Query(query_params.into())).await - } - NodeRoleQueryParam::EntryGateway => { - entry_gateways_basic_active(state, Query(query_params.into())).await - } - NodeRoleQueryParam::ExitGateway => { - exit_gateways_basic_active(state, Query(query_params.into())).await - } - }; - } - - nodes_basic(state, Query(query_params.into()), true).await -} - -async fn mixnodes_basic( - state: State, - query_params: Query, - active_only: bool, -) -> PaginatedSkimmedNodes { - let output = query_params.output.unwrap_or_default(); - - // 1. grab all relevant described nym-nodes - let describe_cache = state.describe_nodes_cache_data().await?; - let mixing_nym_nodes = describe_cache.mixing_nym_nodes(); - - build_skimmed_nodes_response( - &state.0, - query_params, - mixing_nym_nodes, - |state| state.legacy_mixnode_annotations(), - active_only, - output, - ) - .await -} - -/// Returns Nym Nodes and optionally legacy mixnodes (if `no-legacy` flag is not used) -/// that are currently bonded and support mixing role. -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/mixnodes/all", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn mixnodes_basic_all( - state: State, - query_params: Query, -) -> PaginatedSkimmedNodes { - mixnodes_basic(state, query_params, false).await -} - -/// Returns Nym Nodes and optionally legacy mixnodes (if `no-legacy` flag is not used) -/// that are currently bonded and are in the active set with one of the mixing roles. -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/mixnodes/active", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn mixnodes_basic_active( - state: State, - query_params: Query, -) -> PaginatedSkimmedNodes { - mixnodes_basic(state, query_params, true).await -} - -async fn entry_gateways_basic( - state: State, - query_params: Query, - active_only: bool, -) -> PaginatedSkimmedNodes { - let output = query_params.output.unwrap_or_default(); - - // 1. grab all relevant described nym-nodes - let describe_cache = state.describe_nodes_cache_data().await?; - let mixing_nym_nodes = describe_cache.entry_capable_nym_nodes(); - - build_skimmed_nodes_response( - &state.0, - query_params, - mixing_nym_nodes, - |state| state.legacy_gateways_annotations(), - active_only, - output, - ) - .await -} - -/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) -/// that are currently bonded and are in the active set with the entry role. -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/entry-gateways/active", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn entry_gateways_basic_active( - state: State, - query_params: Query, -) -> PaginatedSkimmedNodes { - entry_gateways_basic(state, query_params, true).await -} - -/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) -/// that are currently bonded and support entry gateway role. -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/entry-gateways/all", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn entry_gateways_basic_all( - state: State, - query_params: Query, -) -> PaginatedSkimmedNodes { - entry_gateways_basic(state, query_params, false).await -} - -async fn exit_gateways_basic( - state: State, - query_params: Query, - active_only: bool, -) -> PaginatedSkimmedNodes { - let output = query_params.output.unwrap_or_default(); - - // 1. grab all relevant described nym-nodes - let describe_cache = state.describe_nodes_cache_data().await?; - let mixing_nym_nodes = describe_cache.exit_capable_nym_nodes(); - - build_skimmed_nodes_response( - &state.0, - query_params, - mixing_nym_nodes, - |state| state.legacy_gateways_annotations(), - active_only, - output, - ) - .await -} - -/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) -/// that are currently bonded and are in the active set with the exit role. -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/exit-gateways/active", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn exit_gateways_basic_active( - state: State, - query_params: Query, -) -> PaginatedSkimmedNodes { - exit_gateways_basic(state, query_params, true).await -} - -/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) -/// that are currently bonded and support exit gateway role. -#[utoipa::path( - tag = "Unstable Nym Nodes", - get, - params(NodesParams), - path = "/exit-gateways/all", - context_path = "/v1/unstable/nym-nodes/skimmed", - responses( - (status = 200, content( - (PaginatedCachedNodesResponseSchema = "application/json"), - (PaginatedCachedNodesResponseSchema = "application/yaml"), - (PaginatedCachedNodesResponseSchema = "application/bincode") - )) - ), -)] -pub(super) async fn exit_gateways_basic_all( - state: State, - query_params: Query, -) -> PaginatedSkimmedNodes { - exit_gateways_basic(state, query_params, false).await -} diff --git a/nym-api/src/support/caching/cache.rs b/nym-api/src/support/caching/cache.rs index da746399f54..2f782f6098f 100644 --- a/nym-api/src/support/caching/cache.rs +++ b/nym-api/src/support/caching/cache.rs @@ -7,6 +7,7 @@ use std::time::Duration; use thiserror::Error; use time::OffsetDateTime; use tokio::sync::{RwLock, RwLockMappedWriteGuard, RwLockReadGuard, RwLockWriteGuard}; +use tracing::debug; #[derive(Debug, Error)] #[error("the cache item has not been initialised")] @@ -31,13 +32,23 @@ impl SharedCache { SharedCache::default() } - pub(crate) async fn update(&self, value: impl Into) { - let mut guard = self.0.write().await; + pub(crate) async fn try_update(&self, value: impl Into, typ: &str) -> Result<(), T> { + let value = value.into(); + let mut guard = match tokio::time::timeout(Duration::from_millis(200), self.0.write()).await + { + Ok(guard) => guard, + Err(_) => { + debug!("failed to obtain write permit for {typ} cache"); + return Err(value); + } + }; + if let Some(ref mut existing) = guard.inner { existing.unchecked_update(value) } else { - guard.inner = Some(Cache::new(value.into())) - } + guard.inner = Some(Cache::new(value)) + }; + Ok(()) } pub(crate) async fn get(&self) -> Result>, UninitialisedCache> { @@ -104,6 +115,7 @@ pub struct Cache { } impl Cache> { + #[allow(dead_code)] pub(crate) fn transpose(self) -> Option> { self.value.map(|value| Cache { value, @@ -133,6 +145,16 @@ impl Cache { } } + pub(crate) fn as_mapped(this: &Self, f: F) -> Cache + where + F: Fn(&T) -> U, + { + Cache { + value: f(&this.value), + as_at: this.as_at, + } + } + // ugh. I hate to expose it, but it'd have broken pre-existing code pub(crate) fn clone_cache(&self) -> Self where diff --git a/nym-api/src/support/caching/refresher.rs b/nym-api/src/support/caching/refresher.rs index bee1e49f106..05e6a3a3d49 100644 --- a/nym-api/src/support/caching/refresher.rs +++ b/nym-api/src/support/caching/refresher.rs @@ -5,11 +5,29 @@ use crate::support::caching::cache::SharedCache; use crate::support::caching::CacheNotification; use async_trait::async_trait; use nym_task::TaskClient; +use std::sync::Arc; use std::time::Duration; -use tokio::sync::watch; +use tokio::sync::{watch, Notify}; use tokio::time::interval; use tracing::{error, info, trace, warn}; +pub(crate) type CacheUpdateWatcher = watch::Receiver; + +#[derive(Clone)] +pub struct RefreshRequester(Arc); + +impl RefreshRequester { + pub(crate) fn request_cache_refresh(&self) { + self.0.notify_waiters() + } +} + +impl Default for RefreshRequester { + fn default() -> Self { + RefreshRequester(Arc::new(Notify::new())) + } +} + pub struct CacheRefresher { name: String, refreshing_interval: Duration, @@ -18,11 +36,11 @@ pub struct CacheRefresher { // TODO: the Send + Sync bounds are only required for the `start` method. could we maybe make it less restrictive? provider: Box + Send + Sync>, shared_cache: SharedCache, - // triggers: Vec>, + refresh_requester: RefreshRequester, } #[async_trait] -pub trait CacheItemProvider { +pub(crate) trait CacheItemProvider { type Item; type Error: std::error::Error; @@ -31,28 +49,6 @@ pub trait CacheItemProvider { async fn try_refresh(&self) -> Result; } -// pub struct TriggerFailure; -// -// #[async_trait] -// pub trait RefreshTriggerTrait { -// async fn triggerred(&mut self) -> Result<(), TriggerFailure>; -// } -// -// // TODO: how to get rid of `T: Send + Sync`? it really doesn't need to be Send + Sync -// // since it's wrapped in Shared internally anyway -// #[async_trait] -// impl RefreshTriggerTrait for watch::Receiver -// where -// T: Send + Sync, -// { -// async fn triggerred(&mut self) -> Result<(), TriggerFailure> { -// self.changed().await.map_err(|err| { -// error!("failed to process refresh trigger: {err}"); -// TriggerFailure -// }) -// } -// } - impl CacheRefresher where E: std::error::Error, @@ -69,6 +65,7 @@ where refresh_notification_sender, provider: item_provider, shared_cache: SharedCache::new(), + refresh_requester: Default::default(), } } @@ -85,6 +82,7 @@ where refresh_notification_sender, provider: item_provider, shared_cache, + refresh_requester: Default::default(), } } @@ -94,10 +92,14 @@ where self } - pub(crate) fn update_watcher(&self) -> watch::Receiver { + pub(crate) fn update_watcher(&self) -> CacheUpdateWatcher { self.refresh_notification_sender.subscribe() } + pub(crate) fn refresh_requester(&self) -> RefreshRequester { + self.refresh_requester.clone() + } + #[allow(dead_code)] pub(crate) fn get_shared_cache(&self) -> SharedCache { self.shared_cache.clone() @@ -106,21 +108,44 @@ where // TODO: in the future offer 2 options of refreshing cache. either provide `T` directly // or via `FnMut(&mut T)` closure async fn do_refresh_cache(&self) { - match self.provider.try_refresh().await { - Ok(updated_items) => { - self.shared_cache.update(updated_items).await; - if !self.refresh_notification_sender.is_closed() - && self - .refresh_notification_sender - .send(CacheNotification::Updated) - .is_err() - { - warn!("failed to send cache update notification"); - } - } + let mut updated_items = match self.provider.try_refresh().await { Err(err) => { - error!("{}: failed to refresh the cache: {err}", self.name) + error!("{}: failed to refresh the cache: {err}", self.name); + return; + } + Ok(items) => items, + }; + + let mut failures = 0; + loop { + match self + .shared_cache + .try_update(updated_items, &self.name) + .await + { + Ok(_) => break, + Err(returned) => { + failures += 1; + updated_items = returned + } + }; + if failures % 10 == 0 { + warn!( + "failed to obtain write permit for {} cache {failures} times in a row!", + self.name + ); } + + tokio::time::sleep(Duration::from_secs_f32(0.5)).await + } + + if !self.refresh_notification_sender.is_closed() + && self + .refresh_notification_sender + .send(CacheNotification::Updated) + .is_err() + { + warn!("failed to send cache update notification"); } } @@ -147,6 +172,13 @@ where trace!("{}: Received shutdown", self.name) } _ = refresh_interval.tick() => self.refresh(&mut task_client).await, + // note: `Notify` is not cancellation safe, HOWEVER, there's only one listener, + // so it doesn't matter if we lose our queue position + _ = self.refresh_requester.0.notified() => { + self.refresh(&mut task_client).await; + // since we just performed the full request, we can reset our existing interval + refresh_interval.reset(); + } } } } @@ -159,7 +191,7 @@ where tokio::spawn(async move { self.run(task_client).await }); } - pub fn start_with_watcher(self, task_client: TaskClient) -> watch::Receiver + pub fn start_with_watcher(self, task_client: TaskClient) -> CacheUpdateWatcher where T: Send + Sync + 'static, E: Send + Sync + 'static, diff --git a/nym-api/src/support/cli/run.rs b/nym-api/src/support/cli/run.rs index 334a800657d..30219fc4a9b 100644 --- a/nym-api/src/support/cli/run.rs +++ b/nym-api/src/support/cli/run.rs @@ -10,8 +10,9 @@ use crate::ecash::dkg::controller::keys::{ use crate::ecash::dkg::controller::DkgController; use crate::ecash::state::EcashState; use crate::epoch_operations::EpochAdvancer; +use crate::key_rotation::KeyRotationController; use crate::network::models::NetworkDetails; -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::node_status_api::handlers::unstable; use crate::node_status_api::uptime_updater::HistoricalUptimeUpdater; use crate::node_status_api::NodeStatusCache; @@ -20,14 +21,14 @@ use crate::status::{ApiStatusState, SignerState}; use crate::support::caching::cache::SharedCache; use crate::support::config::helpers::try_load_current_config; use crate::support::config::{Config, DEFAULT_CHAIN_STATUS_CACHE_TTL}; -use crate::support::http::state::{ - AppState, ChainStatusCache, ForcedRefresh, ShutdownHandles, TASK_MANAGER_TIMEOUT_S, -}; -use crate::support::http::RouterBuilder; +use crate::support::http::state::chain_status::ChainStatusCache; +use crate::support::http::state::force_refresh::ForcedRefresh; +use crate::support::http::state::AppState; +use crate::support::http::{RouterBuilder, ShutdownHandles, TASK_MANAGER_TIMEOUT_S}; use crate::support::nyxd; use crate::support::storage::runtime_migrations::m001_directory_services_v2_1::migrate_to_directory_services_v2_1; use crate::support::storage::NymApiStorage; -use crate::unstable_routes::account::cache::AddressInfoCache; +use crate::unstable_routes::v1::account::cache::AddressInfoCache; use crate::{ circulating_supply_api, ecash, epoch_operations, network_monitor, node_describe_cache, node_status_api, nym_contract_cache, @@ -106,7 +107,7 @@ pub(crate) struct Args { pub(crate) allow_illegal_ips: bool, } -async fn start_nym_api_tasks_axum(config: &Config) -> anyhow::Result { +async fn start_nym_api_tasks(config: &Config) -> anyhow::Result { let task_manager = TaskManager::new(TASK_MANAGER_TIMEOUT_S); let nyxd_client = nyxd::Client::new(config)?; @@ -213,28 +214,34 @@ async fn start_nym_api_tasks_axum(config: &Config) -> anyhow::Result anyhow::Result anyhow::Result<()> { config.validate()?; - let mut axum_shutdown = start_nym_api_tasks_axum(&config).await?; + let mut axum_shutdown = start_nym_api_tasks(&config).await?; // it doesn't matter which server catches the interrupt: it needs only be caught once if let Err(err) = axum_shutdown.task_manager_mut().catch_interrupt().await { diff --git a/nym-api/src/support/http/mod.rs b/nym-api/src/support/http/mod.rs index b9f12530ee8..39049f38d32 100644 --- a/nym-api/src/support/http/mod.rs +++ b/nym-api/src/support/http/mod.rs @@ -1,10 +1,62 @@ // Copyright 2022-2023 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only +use nym_task::TaskManager; +use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; + pub(crate) mod helpers; pub(crate) mod openapi; pub(crate) mod router; pub(crate) mod state; + pub(crate) use router::RouterBuilder; -use crate::unstable_routes; +pub(crate) const TASK_MANAGER_TIMEOUT_S: u64 = 10; + +/// Shutdown goes 2 directions: +/// 1. signal background tasks to gracefully finish +/// 2. signal server itself +/// +/// These are done through separate shutdown handles. Of course, shut down server +/// AFTER you have shut down BG tasks (or past their grace period). +pub(crate) struct ShutdownHandles { + task_manager: TaskManager, + axum_shutdown_button: ShutdownAxum, + /// Tokio JoinHandle for axum server's task + axum_join_handle: AxumJoinHandle, +} + +impl ShutdownHandles { + /// Cancellation token is given to Axum server constructor. When the token + /// receives a shutdown signal, Axum server will shut down gracefully. + pub(crate) fn new( + task_manager: TaskManager, + axum_server_handle: AxumJoinHandle, + shutdown_button: CancellationToken, + ) -> Self { + Self { + task_manager, + axum_shutdown_button: ShutdownAxum(shutdown_button.clone()), + axum_join_handle: axum_server_handle, + } + } + + pub(crate) fn task_manager_mut(&mut self) -> &mut TaskManager { + &mut self.task_manager + } + + /// Signal server to shut down, then return join handle to its + /// `tokio` task + /// + /// https://tikv.github.io/doc/tokio/task/struct.JoinHandle.html + #[must_use] + pub(crate) fn shutdown_axum(self) -> AxumJoinHandle { + self.axum_shutdown_button.0.cancel(); + self.axum_join_handle + } +} + +struct ShutdownAxum(CancellationToken); + +type AxumJoinHandle = JoinHandle>; diff --git a/nym-api/src/support/http/router.rs b/nym-api/src/support/http/router.rs index 588b0e41343..8925b3beee2 100644 --- a/nym-api/src/support/http/router.rs +++ b/nym-api/src/support/http/router.rs @@ -11,7 +11,8 @@ use crate::nym_nodes::handlers::nym_node_routes; use crate::status; use crate::support::http::openapi::ApiDoc; use crate::support::http::state::AppState; -use crate::support::http::unstable_routes::unstable_routes; +use crate::unstable_routes::v1::unstable_routes_v1; +use crate::unstable_routes::v2::unstable_routes_v2; use anyhow::anyhow; use axum::response::Redirect; use axum::routing::get; @@ -64,8 +65,9 @@ impl RouterBuilder { .nest("/api-status", status::handlers::api_status_routes()) .nest("/nym-nodes", nym_node_routes()) .nest("/ecash", ecash_routes()) - .nest("/unstable", unstable_routes()), // CORS layer needs to be "outside" of routes - ); + .nest("/unstable", unstable_routes_v1()), // CORS layer needs to be "outside" of routes + ) + .nest("/v2", Router::new().nest("/unstable", unstable_routes_v2())); Self { unfinished_router: default_routes, diff --git a/nym-api/src/support/http/state/chain_status.rs b/nym-api/src/support/http/state/chain_status.rs new file mode 100644 index 00000000000..5dfd0130ac9 --- /dev/null +++ b/nym-api/src/support/http/state/chain_status.rs @@ -0,0 +1,91 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node_status_api::models::AxumErrorResponse; +use crate::support::nyxd::Client; +use nym_api_requests::models::DetailedChainStatus; +use std::sync::Arc; +use std::time::Duration; +use time::OffsetDateTime; +use tokio::sync::RwLock; + +#[derive(Clone)] +pub(crate) struct ChainStatusCache { + cache_ttl: Duration, + inner: Arc>>, +} + +impl ChainStatusCache { + pub(crate) fn new(cache_ttl: Duration) -> Self { + ChainStatusCache { + cache_ttl, + inner: Arc::new(Default::default()), + } + } +} + +struct ChainStatusCacheInner { + last_refreshed_at: OffsetDateTime, + cache_value: DetailedChainStatus, +} + +impl ChainStatusCacheInner { + fn is_valid(&self, ttl: Duration) -> bool { + if self.last_refreshed_at + ttl > OffsetDateTime::now_utc() { + return true; + } + false + } +} + +impl ChainStatusCache { + pub(crate) async fn get_or_refresh( + &self, + client: &Client, + ) -> Result { + if let Some(cached) = self.check_cache().await { + return Ok(cached); + } + + self.refresh(client).await + } + + async fn check_cache(&self) -> Option { + let guard = self.inner.read().await; + let inner = guard.as_ref()?; + if inner.is_valid(self.cache_ttl) { + return Some(inner.cache_value.clone()); + } + None + } + + async fn refresh(&self, client: &Client) -> Result { + // 1. attempt to get write lock permit + let mut guard = self.inner.write().await; + + // 2. check if another task hasn't already updated the cache whilst we were waiting for the permit + if let Some(cached) = guard.as_ref() { + if cached.is_valid(self.cache_ttl) { + return Ok(cached.cache_value.clone()); + } + } + + // 3. attempt to query the chain for the chain data + let abci = client.abci_info().await?; + let block = client + .block_info(abci.last_block_height.value() as u32) + .await?; + + let status = DetailedChainStatus { + abci: abci.into(), + latest_block: block.into(), + }; + + *guard = Some(ChainStatusCacheInner { + last_refreshed_at: OffsetDateTime::now_utc(), + cache_value: status.clone(), + }); + + Ok(status) + } +} diff --git a/nym-api/src/support/http/state/force_refresh.rs b/nym-api/src/support/http/state/force_refresh.rs new file mode 100644 index 00000000000..667de00371f --- /dev/null +++ b/nym-api/src/support/http/state/force_refresh.rs @@ -0,0 +1,34 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use nym_mixnet_contract_common::NodeId; +use std::collections::HashMap; +use std::sync::Arc; +use time::OffsetDateTime; +use tokio::sync::RwLock; + +#[derive(Clone)] +pub(crate) struct ForcedRefresh { + pub(crate) allow_all_ip_addresses: bool, + pub(crate) refreshes: Arc>>, +} + +impl ForcedRefresh { + pub(crate) fn new(allow_all_ip_addresses: bool) -> ForcedRefresh { + ForcedRefresh { + allow_all_ip_addresses, + refreshes: Arc::new(Default::default()), + } + } + + pub(crate) async fn last_refreshed(&self, node_id: NodeId) -> Option { + self.refreshes.read().await.get(&node_id).copied() + } + + pub(crate) async fn set_last_refreshed(&self, node_id: NodeId) { + self.refreshes + .write() + .await + .insert(node_id, OffsetDateTime::now_utc()); + } +} diff --git a/nym-api/src/support/http/state.rs b/nym-api/src/support/http/state/mod.rs similarity index 50% rename from nym-api/src/support/http/state.rs rename to nym-api/src/support/http/state/mod.rs index 2fc65ff5a0f..b3bcf49216e 100644 --- a/nym-api/src/support/http/state.rs +++ b/nym-api/src/support/http/state/mod.rs @@ -4,7 +4,7 @@ use crate::circulating_supply_api::cache::CirculatingSupplyCache; use crate::ecash::state::EcashState; use crate::network::models::NetworkDetails; -use crate::node_describe_cache::DescribedNodes; +use crate::node_describe_cache::cache::DescribedNodes; use crate::node_status_api::handlers::unstable; use crate::node_status_api::models::AxumErrorResponse; use crate::node_status_api::NodeStatusCache; @@ -12,73 +12,22 @@ use crate::nym_contract_cache::cache::NymContractCache; use crate::status::ApiStatusState; use crate::support::caching::cache::SharedCache; use crate::support::caching::Cache; +use crate::support::http::state::chain_status::ChainStatusCache; +use crate::support::http::state::force_refresh::ForcedRefresh; use crate::support::nyxd::Client; use crate::support::storage; -use crate::unstable_routes::account::cache::AddressInfoCache; -use crate::unstable_routes::models::NyxAccountDetails; +use crate::unstable_routes::v1::account::cache::AddressInfoCache; +use crate::unstable_routes::v1::account::models::NyxAccountDetails; use axum::extract::FromRef; -use nym_api_requests::models::{ - DetailedChainStatus, GatewayBondAnnotated, MixNodeBondAnnotated, NodeAnnotation, -}; +use nym_api_requests::models::{GatewayBondAnnotated, MixNodeBondAnnotated, NodeAnnotation}; use nym_mixnet_contract_common::NodeId; -use nym_task::TaskManager; use nym_topology::CachedEpochRewardedSet; use std::collections::HashMap; use std::sync::Arc; -use std::time::Duration; -use time::OffsetDateTime; -use tokio::sync::{RwLock, RwLockReadGuard}; -use tokio::task::JoinHandle; -use tokio_util::sync::CancellationToken; +use tokio::sync::RwLockReadGuard; -pub(crate) const TASK_MANAGER_TIMEOUT_S: u64 = 10; - -/// Shutdown goes 2 directions: -/// 1. signal background tasks to gracefully finish -/// 2. signal server itself -/// -/// These are done through separate shutdown handles. Of course, shut down server -/// AFTER you have shut down BG tasks (or past their grace period). -pub(crate) struct ShutdownHandles { - task_manager: TaskManager, - axum_shutdown_button: ShutdownAxum, - /// Tokio JoinHandle for axum server's task - axum_join_handle: AxumJoinHandle, -} - -impl ShutdownHandles { - /// Cancellation token is given to Axum server constructor. When the token - /// receives a shutdown signal, Axum server will shut down gracefully. - pub(crate) fn new( - task_manager: TaskManager, - axum_server_handle: AxumJoinHandle, - shutdown_button: CancellationToken, - ) -> Self { - Self { - task_manager, - axum_shutdown_button: ShutdownAxum(shutdown_button.clone()), - axum_join_handle: axum_server_handle, - } - } - - pub(crate) fn task_manager_mut(&mut self) -> &mut TaskManager { - &mut self.task_manager - } - - /// Signal server to shut down, then return join handle to its - /// `tokio` task - /// - /// https://tikv.github.io/doc/tokio/task/struct.JoinHandle.html - #[must_use] - pub(crate) fn shutdown_axum(self) -> AxumJoinHandle { - self.axum_shutdown_button.0.cancel(); - self.axum_join_handle - } -} - -struct ShutdownAxum(CancellationToken); - -type AxumJoinHandle = JoinHandle>; +pub(crate) mod chain_status; +pub(crate) mod force_refresh; #[derive(Clone)] pub(crate) struct AppState { @@ -113,113 +62,6 @@ impl FromRef for Arc { } } -#[derive(Clone)] -pub(crate) struct ForcedRefresh { - pub(crate) allow_all_ip_addresses: bool, - pub(crate) refreshes: Arc>>, -} - -impl ForcedRefresh { - pub(crate) fn new(allow_all_ip_addresses: bool) -> ForcedRefresh { - ForcedRefresh { - allow_all_ip_addresses, - refreshes: Arc::new(Default::default()), - } - } - - pub(crate) async fn last_refreshed(&self, node_id: NodeId) -> Option { - self.refreshes.read().await.get(&node_id).copied() - } - - pub(crate) async fn set_last_refreshed(&self, node_id: NodeId) { - self.refreshes - .write() - .await - .insert(node_id, OffsetDateTime::now_utc()); - } -} - -#[derive(Clone)] -pub(crate) struct ChainStatusCache { - cache_ttl: Duration, - inner: Arc>>, -} - -impl ChainStatusCache { - pub(crate) fn new(cache_ttl: Duration) -> Self { - ChainStatusCache { - cache_ttl, - inner: Arc::new(Default::default()), - } - } -} - -struct ChainStatusCacheInner { - last_refreshed_at: OffsetDateTime, - cache_value: DetailedChainStatus, -} - -impl ChainStatusCacheInner { - fn is_valid(&self, ttl: Duration) -> bool { - if self.last_refreshed_at + ttl > OffsetDateTime::now_utc() { - return true; - } - false - } -} - -impl ChainStatusCache { - pub(crate) async fn get_or_refresh( - &self, - client: &Client, - ) -> Result { - if let Some(cached) = self.check_cache().await { - return Ok(cached); - } - - self.refresh(client).await - } - - async fn check_cache(&self) -> Option { - let guard = self.inner.read().await; - let inner = guard.as_ref()?; - if inner.is_valid(self.cache_ttl) { - return Some(inner.cache_value.clone()); - } - None - } - - async fn refresh(&self, client: &Client) -> Result { - // 1. attempt to get write lock permit - let mut guard = self.inner.write().await; - - // 2. check if another task hasn't already updated the cache whilst we were waiting for the permit - if let Some(cached) = guard.as_ref() { - if cached.is_valid(self.cache_ttl) { - return Ok(cached.cache_value.clone()); - } - } - - // 3. attempt to query the chain for the chain data - let abci = client.abci_info().await?; - let block = client - .block_info(abci.last_block_height.value() as u32) - .await?; - - let status = DetailedChainStatus { - abci: abci.into(), - latest_block: block.into(), - }; - - *guard = Some(ChainStatusCacheInner { - last_refreshed_at: OffsetDateTime::now_utc(), - cache_value: status.clone(), - }); - - Ok(status) - } -} - impl AppState { pub(crate) fn nym_contract_cache(&self) -> &NymContractCache { &self.nym_contract_cache @@ -260,11 +102,8 @@ impl AppState { pub(crate) async fn rewarded_set( &self, - ) -> Result>, AxumErrorResponse> { - self.nym_contract_cache() - .rewarded_set() - .await - .ok_or_else(AxumErrorResponse::internal) + ) -> Result, AxumErrorResponse> { + Ok(self.nym_contract_cache().cached_rewarded_set().await?) } pub(crate) async fn node_annotations( diff --git a/nym-api/src/support/legacy_helpers.rs b/nym-api/src/support/legacy_helpers.rs index abc95b1b6d4..430a73aad6b 100644 --- a/nym-api/src/support/legacy_helpers.rs +++ b/nym-api/src/support/legacy_helpers.rs @@ -4,12 +4,12 @@ use nym_api_requests::legacy::{LegacyMixNodeBondWithLayer, LegacyMixNodeDetailsWithLayer}; use nym_api_requests::models::NymNodeData; use nym_config::defaults::DEFAULT_NYM_NODE_HTTP_PORT; -use nym_crypto::aes::cipher::crypto_common::rand_core::OsRng; use nym_mixnet_contract_common::mixnode::LegacyPendingMixNodeChanges; use nym_mixnet_contract_common::{ Gateway, GatewayBond, LegacyMixLayer, MixNode, MixNodeBond, NymNodeDetails, }; use rand::prelude::SliceRandom; +use rand::rngs::OsRng; use std::net::{IpAddr, ToSocketAddrs}; use std::str::FromStr; @@ -61,7 +61,12 @@ pub(crate) fn to_legacy_mixnode( .node .custom_http_port .unwrap_or(DEFAULT_NYM_NODE_HTTP_PORT), - sphinx_key: description.host_information.keys.x25519.to_base58_string(), + sphinx_key: description + .host_information + .keys + .current_x25519_sphinx_key + .public_key + .to_base58_string(), identity_key: nym_node.bond_information.node.identity_key.clone(), version: description.build_information.build_version.clone(), }, @@ -95,7 +100,12 @@ pub(crate) fn to_legacy_gateway( .location .map(|c| c.to_string()) .unwrap_or_default(), - sphinx_key: description.host_information.keys.x25519.to_base58_string(), + sphinx_key: description + .host_information + .keys + .current_x25519_sphinx_key + .public_key + .to_base58_string(), identity_key: nym_node.bond_information.node.identity_key.clone(), version: description.build_information.build_version.clone(), }, diff --git a/nym-api/src/support/nyxd/mod.rs b/nym-api/src/support/nyxd/mod.rs index 75490e8a4cc..41ef7d2bc44 100644 --- a/nym-api/src/support/nyxd/mod.rs +++ b/nym-api/src/support/nyxd/mod.rs @@ -30,8 +30,8 @@ use nym_mixnet_contract_common::nym_node::Role; use nym_mixnet_contract_common::reward_params::RewardingParams; use nym_mixnet_contract_common::{ ConfigScoreParams, CurrentIntervalResponse, Delegation, EpochRewardedSet, EpochStatus, - ExecuteMsg, GatewayBond, HistoricalNymNodeVersionEntry, IdentityKey, NymNodeDetails, - RewardedSet, RoleAssignment, + ExecuteMsg, GatewayBond, HistoricalNymNodeVersionEntry, IdentityKey, KeyRotationState, + NymNodeDetails, RewardedSet, RoleAssignment, }; use nym_validator_client::coconut::EcashApiError; use nym_validator_client::nyxd::contract_traits::mixnet_query_client::MixnetQueryClientExt; @@ -239,6 +239,10 @@ impl Client { nyxd_query!(self, get_all_preassigned_gateway_ids().await) } + pub(crate) async fn get_key_rotation_state(&self) -> Result { + nyxd_query!(self, get_key_rotation_state().await) + } + pub(crate) async fn get_config_score_params(&self) -> Result { nyxd_query!(self, get_mixnet_contract_state_params().await) .map(|state| state.config_score_params) diff --git a/nym-api/src/support/storage/manager.rs b/nym-api/src/support/storage/manager.rs index e30b316c0d9..5e1d2eeb7cf 100644 --- a/nym-api/src/support/storage/manager.rs +++ b/nym-api/src/support/storage/manager.rs @@ -11,7 +11,6 @@ use crate::support::storage::models::{ use crate::support::storage::DbIdCache; use nym_mixnet_contract_common::{EpochId, IdentityKey, NodeId}; use nym_types::monitoring::NodeResult; -use sqlx::FromRow; use time::{Date, OffsetDateTime}; use tracing::info; @@ -20,113 +19,8 @@ pub(crate) struct StorageManager { pub(crate) connection_pool: sqlx::SqlitePool, } -pub struct AvgMixnodeReliability { - mix_id: NodeId, - value: Option, -} - -impl AvgMixnodeReliability { - pub fn mix_id(&self) -> NodeId { - self.mix_id - } - - pub fn value(&self) -> f32 { - self.value.unwrap_or_default() - } -} - -#[derive(FromRow)] -pub struct AvgGatewayReliability { - node_id: NodeId, - value: Option, -} - -impl AvgGatewayReliability { - pub fn node_id(&self) -> NodeId { - self.node_id - } - - pub fn value(&self) -> f32 { - self.value.unwrap_or_default() - } -} - // all SQL goes here impl StorageManager { - pub(super) async fn get_all_avg_mix_reliability_in_last_24hr( - &self, - end_ts_secs: i64, - ) -> Result, sqlx::Error> { - let start_ts_secs = end_ts_secs - 86400; - self.get_all_avg_mix_reliability_in_time_interval(start_ts_secs, end_ts_secs) - .await - } - - pub(super) async fn get_all_avg_gateway_reliability_in_last_24hr( - &self, - end_ts_secs: i64, - ) -> Result, sqlx::Error> { - let start_ts_secs = end_ts_secs - 86400; - self.get_all_avg_gateway_reliability_in_interval(start_ts_secs, end_ts_secs) - .await - } - - pub(super) async fn get_all_avg_mix_reliability_in_time_interval( - &self, - start_ts_secs: i64, - end_ts_secs: i64, - ) -> Result, sqlx::Error> { - let result = sqlx::query_as!( - AvgMixnodeReliability, - r#" - SELECT - d.mix_id as "mix_id: NodeId", - AVG(s.reliability) as "value: f32" - FROM - mixnode_details d - JOIN - mixnode_status s on d.id = s.mixnode_details_id - WHERE - timestamp >= ? AND - timestamp <= ? - GROUP BY 1 - "#, - start_ts_secs, - end_ts_secs - ) - .fetch_all(&self.connection_pool) - .await?; - Ok(result) - } - - pub(super) async fn get_all_avg_gateway_reliability_in_interval( - &self, - start_ts_secs: i64, - end_ts_secs: i64, - ) -> Result, sqlx::Error> { - let result = sqlx::query_as!( - AvgGatewayReliability, - r#" - SELECT - d.node_id as "node_id: NodeId", - CASE WHEN count(*) > 3 THEN AVG(reliability) ELSE 100 END as "value: f32" - FROM - gateway_details d - JOIN - gateway_status s on d.id = s.gateway_details_id - WHERE - timestamp >= ? AND - timestamp <= ? - GROUP BY 1 - "#, - start_ts_secs, - end_ts_secs - ) - .fetch_all(&self.connection_pool) - .await?; - Ok(result) - } - /// Tries to obtain row id of given mixnode given its identity. /// /// # Arguments diff --git a/nym-api/src/support/storage/mod.rs b/nym-api/src/support/storage/mod.rs index cc10d55e76c..48f741fb58e 100644 --- a/nym-api/src/support/storage/mod.rs +++ b/nym-api/src/support/storage/mod.rs @@ -1,7 +1,6 @@ // Copyright 2021 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use self::manager::{AvgGatewayReliability, AvgMixnodeReliability}; use crate::network_monitor::monitor::summary_producer::TestReport; use crate::network_monitor::test_route::TestRoute; use crate::node_status_api::models::{ @@ -134,30 +133,6 @@ impl NymApiStorage { Ok(None) } - pub(crate) async fn get_all_avg_gateway_reliability_in_last_24hr( - &self, - end_ts_secs: i64, - ) -> Result, NymApiStorageError> { - let result = self - .manager - .get_all_avg_gateway_reliability_in_last_24hr(end_ts_secs) - .await?; - - Ok(result) - } - - pub(crate) async fn get_all_avg_mix_reliability_in_last_24hr( - &self, - end_ts_secs: i64, - ) -> Result, NymApiStorageError> { - let result = self - .manager - .get_all_avg_mix_reliability_in_last_24hr(end_ts_secs) - .await?; - - Ok(result) - } - /// Gets all statuses for particular mixnode that were inserted /// since the provided timestamp. /// diff --git a/nym-api/src/nym_nodes/handlers/unstable/helpers.rs b/nym-api/src/unstable_routes/helpers.rs similarity index 95% rename from nym-api/src/nym_nodes/handlers/unstable/helpers.rs rename to nym-api/src/unstable_routes/helpers.rs index 3940d992379..4627ebce94e 100644 --- a/nym-api/src/nym_nodes/handlers/unstable/helpers.rs +++ b/nym-api/src/unstable_routes/helpers.rs @@ -1,4 +1,4 @@ -// Copyright 2024 - Nym Technologies SA +// Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only use nym_api_requests::models::{ diff --git a/nym-api/src/unstable_routes/mod.rs b/nym-api/src/unstable_routes/mod.rs index d48ecb1e1e3..d95805ba63b 100644 --- a/nym-api/src/unstable_routes/mod.rs +++ b/nym-api/src/unstable_routes/mod.rs @@ -1,15 +1,6 @@ // Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -pub(crate) mod account; -pub(crate) mod models; - -use crate::support::http::state::AppState; -use axum::Router; - -// as those get stabilised, they should get deprecated and use a redirection instead -pub(crate) fn unstable_routes() -> Router { - Router::new() - .nest("/nym-nodes", crate::nym_nodes::handlers::unstable::routes()) - .nest("/account", account::routes()) -} +pub(crate) mod helpers; +pub(crate) mod v1; +pub(crate) mod v2; diff --git a/nym-api/src/unstable_routes/account/cache.rs b/nym-api/src/unstable_routes/v1/account/cache.rs similarity index 93% rename from nym-api/src/unstable_routes/account/cache.rs rename to nym-api/src/unstable_routes/v1/account/cache.rs index 261018962a8..ca4cb60020e 100644 --- a/nym-api/src/unstable_routes/account/cache.rs +++ b/nym-api/src/unstable_routes/v1/account/cache.rs @@ -1,11 +1,6 @@ -use crate::{ - node_status_api::models::AxumResult, - nym_contract_cache::cache::NymContractCache, - unstable_routes::{ - account::data_collector::AddressDataCollector, - models::{NyxAccountDelegationDetails, NyxAccountDetails}, - }, -}; +use crate::unstable_routes::v1::account::data_collector::AddressDataCollector; +use crate::unstable_routes::v1::account::models::{NyxAccountDelegationDetails, NyxAccountDetails}; +use crate::{node_status_api::models::AxumResult, nym_contract_cache::cache::NymContractCache}; use moka::{future::Cache, Entry}; use nym_validator_client::nyxd::AccountId; use std::{sync::Arc, time::Duration}; diff --git a/nym-api/src/unstable_routes/account/data_collector.rs b/nym-api/src/unstable_routes/v1/account/data_collector.rs similarity index 98% rename from nym-api/src/unstable_routes/account/data_collector.rs rename to nym-api/src/unstable_routes/v1/account/data_collector.rs index f9829b29a82..2cd7b70e1a8 100644 --- a/nym-api/src/unstable_routes/account/data_collector.rs +++ b/nym-api/src/unstable_routes/v1/account/data_collector.rs @@ -1,10 +1,10 @@ // Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only +use crate::unstable_routes::v1::account::models::NyxAccountDelegationRewardDetails; use crate::{ node_status_api::models::{AxumErrorResponse, AxumResult}, nym_contract_cache::cache::NymContractCache, - unstable_routes::models::NyxAccountDelegationRewardDetails, }; use cosmwasm_std::{Coin, Decimal}; use nym_mixnet_contract_common::NodeRewarding; diff --git a/nym-api/src/unstable_routes/account/mod.rs b/nym-api/src/unstable_routes/v1/account/mod.rs similarity index 96% rename from nym-api/src/unstable_routes/account/mod.rs rename to nym-api/src/unstable_routes/v1/account/mod.rs index 255f49b473c..8b7b83d11cd 100644 --- a/nym-api/src/unstable_routes/account/mod.rs +++ b/nym-api/src/unstable_routes/v1/account/mod.rs @@ -4,13 +4,13 @@ use crate::{ node_status_api::models::{AxumErrorResponse, AxumResult}, support::http::state::AppState, - unstable_routes::models::NyxAccountDetails, }; use axum::{ extract::{Path, State}, routing::get, Json, Router, }; +use models::NyxAccountDetails; use nym_validator_client::nyxd::AccountId; use serde::{Deserialize, Serialize}; use std::str::FromStr; @@ -19,6 +19,7 @@ use utoipa::ToSchema; pub(crate) mod cache; pub(crate) mod data_collector; +pub(crate) mod models; pub(crate) fn routes() -> Router { Router::new().route("/:address", get(address)) diff --git a/nym-api/src/unstable_routes/models.rs b/nym-api/src/unstable_routes/v1/account/models.rs similarity index 100% rename from nym-api/src/unstable_routes/models.rs rename to nym-api/src/unstable_routes/v1/account/models.rs diff --git a/nym-api/src/unstable_routes/v1/mod.rs b/nym-api/src/unstable_routes/v1/mod.rs new file mode 100644 index 00000000000..321ae6039ed --- /dev/null +++ b/nym-api/src/unstable_routes/v1/mod.rs @@ -0,0 +1,15 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::support::http::state::AppState; +use axum::Router; + +pub(crate) mod account; +pub(crate) mod nym_nodes; + +// as those get stabilised, they should get deprecated and use a redirection instead +pub(crate) fn unstable_routes_v1() -> Router { + Router::new() + .nest("/nym-nodes", nym_nodes::routes()) + .nest("/account", account::routes()) +} diff --git a/nym-api/src/nym_nodes/handlers/unstable/full_fat.rs b/nym-api/src/unstable_routes/v1/nym_nodes/full_fat/mod.rs similarity index 74% rename from nym-api/src/nym_nodes/handlers/unstable/full_fat.rs rename to nym-api/src/unstable_routes/v1/nym_nodes/full_fat/mod.rs index 2937b0377df..df76984fcf3 100644 --- a/nym-api/src/nym_nodes/handlers/unstable/full_fat.rs +++ b/nym-api/src/unstable_routes/v1/nym_nodes/full_fat/mod.rs @@ -1,9 +1,9 @@ -// Copyright 2024 - Nym Technologies SA +// Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only use crate::node_status_api::models::{AxumErrorResponse, AxumResult}; -use crate::nym_nodes::handlers::unstable::NodesParamsWithRole; use crate::support::http::state::AppState; +use crate::unstable_routes::v1::nym_nodes::helpers::NodesParamsWithRole; use axum::extract::{Query, State}; use nym_api_requests::nym_nodes::{CachedNodesResponse, FullFatNode}; use nym_http_api_common::FormattedResponse; @@ -15,11 +15,11 @@ use nym_http_api_common::FormattedResponse; path = "", context_path = "/v1/unstable/nym-nodes/full-fat", responses( - // (status = 200, body = CachedNodesResponse) + // (status = 200, body = CachedNodesResponse) (status = 501) ) )] -pub(super) async fn nodes_detailed( +pub(crate) async fn nodes_detailed( _state: State, _query_params: Query, ) -> AxumResult>> { diff --git a/nym-api/src/unstable_routes/v1/nym_nodes/handlers.rs b/nym-api/src/unstable_routes/v1/nym_nodes/handlers.rs new file mode 100644 index 00000000000..f2582cec820 --- /dev/null +++ b/nym-api/src/unstable_routes/v1/nym_nodes/handlers.rs @@ -0,0 +1,51 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use crate::node_status_api::models::{AxumErrorResponse, AxumResult}; +use crate::support::http::state::AppState; +use axum::extract::{Query, State}; +use axum::Json; +use nym_api_requests::nym_nodes::{NodesByAddressesRequestBody, NodesByAddressesResponse}; +use nym_http_api_common::{FormattedResponse, OutputParams}; +use std::collections::HashMap; + +#[utoipa::path( + tag = "Unstable Nym Nodes", + post, + request_body = NodesByAddressesRequestBody, + path = "/by-addresses", + context_path = "/v1/unstable/nym-nodes", + responses( + (status = 200, content( + (NodesByAddressesResponse = "application/json"), + (NodesByAddressesResponse = "application/yaml"), + (NodesByAddressesResponse = "application/bincode") + )) + ), + params(OutputParams) +)] +pub(crate) async fn nodes_by_addresses( + Query(output): Query, + state: State, + Json(body): Json, +) -> AxumResult> { + // if the request is too big, simply reject it + if body.addresses.len() > 100 { + return Err(AxumErrorResponse::bad_request( + "requested too many addresses", + )); + } + + let output = output.output.unwrap_or_default(); + + // TODO: perhaps introduce different cache because realistically nym-api will receive + // request for the same couple addresses from all nodes in quick succession + let describe_cache = state.describe_nodes_cache_data().await?; + + let mut existence = HashMap::new(); + for address in body.addresses { + existence.insert(address, describe_cache.node_with_address(address)); + } + + Ok(output.to_response(NodesByAddressesResponse { existence })) +} diff --git a/nym-api/src/unstable_routes/v1/nym_nodes/helpers.rs b/nym-api/src/unstable_routes/v1/nym_nodes/helpers.rs new file mode 100644 index 00000000000..d3c021a7a9a --- /dev/null +++ b/nym-api/src/unstable_routes/v1/nym_nodes/helpers.rs @@ -0,0 +1,65 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use crate::support::http::helpers::PaginationRequest; +use nym_api_requests::nym_nodes::NodeRoleQueryParam; +use nym_http_api_common::Output; +use serde::Deserialize; + +#[derive(Debug, Deserialize, utoipa::IntoParams)] +pub(crate) struct NodesParamsWithRole { + #[param(inline)] + pub(crate) role: Option, + + #[allow(dead_code)] + pub(crate) semver_compatibility: Option, + pub(crate) no_legacy: Option, + pub(crate) page: Option, + pub(crate) per_page: Option, + + // Identifier for the current epoch of the topology state. When sent by a client we can check if + // the client already knows about the latest topology state, allowing a `no-updates` response + // instead of wasting bandwidth serving an unchanged topology. + pub(crate) epoch_id: Option, + + pub(crate) output: Option, +} + +#[derive(Debug, Deserialize, utoipa::IntoParams)] +#[into_params(parameter_in = Query)] +pub(crate) struct NodesParams { + #[allow(dead_code)] + pub(crate) semver_compatibility: Option, + pub(crate) no_legacy: Option, + pub(crate) page: Option, + pub(crate) per_page: Option, + + // Identifier for the current epoch of the topology state. When sent by a client we can check if + // the client already knows about the latest topology state, allowing a `no-updates` response + // instead of wasting bandwidth serving an unchanged topology. + pub(crate) epoch_id: Option, + pub(crate) output: Option, +} + +impl From for NodesParams { + fn from(params: NodesParamsWithRole) -> Self { + NodesParams { + semver_compatibility: params.semver_compatibility, + no_legacy: params.no_legacy, + page: params.page, + per_page: params.per_page, + epoch_id: params.epoch_id, + output: params.output, + } + } +} + +impl<'a> From<&'a NodesParams> for PaginationRequest { + fn from(params: &'a NodesParams) -> Self { + PaginationRequest { + output: params.output, + page: params.page, + per_page: params.per_page, + } + } +} diff --git a/nym-api/src/unstable_routes/v1/nym_nodes/mod.rs b/nym-api/src/unstable_routes/v1/nym_nodes/mod.rs new file mode 100644 index 00000000000..73499d859bf --- /dev/null +++ b/nym-api/src/unstable_routes/v1/nym_nodes/mod.rs @@ -0,0 +1,80 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +//! All routes/nodes are split into three tiers: +//! +//! `/skimmed` +//! - used by clients +//! - returns the very basic information for routing purposes +//! +//! `/semi-skimmed` +//! - used by other nodes/VPN +//! - returns more additional information such as noise keys +//! +//! `/full-fat` +//! - used by explorers, et al. +//! - returns almost everything there is about the nodes +//! +//! There's also additional split based on the role: +//! - `?role` => filters based on the specific role (mixnode/gateway/(in the future: entry/exit)) +//! - `/mixnodes/` => only returns mixnode role data +//! - `/gateway/` => only returns (entry) gateway role data + +use crate::support::http::state::AppState; +use crate::unstable_routes::v1::nym_nodes::full_fat::nodes_detailed; +use crate::unstable_routes::v1::nym_nodes::handlers::nodes_by_addresses; +use crate::unstable_routes::v1::nym_nodes::semi_skimmed::nodes_expanded; +use axum::routing::{get, post}; +use axum::Router; +use tower_http::compression::CompressionLayer; + +#[allow(deprecated)] +use crate::unstable_routes::v1::nym_nodes::skimmed::{ + entry_gateways_basic_active, entry_gateways_basic_all, exit_gateways_basic_active, + exit_gateways_basic_all, mixnodes_basic_active, mixnodes_basic_all, nodes_basic_active, + nodes_basic_all, +}; + +pub(crate) mod full_fat; +pub(crate) mod handlers; +pub(crate) mod helpers; +pub(crate) mod semi_skimmed; +pub(crate) mod skimmed; + +#[allow(deprecated)] +pub(crate) fn routes() -> Router { + Router::new() + .nest( + "/skimmed", + Router::new() + .route("/", get(nodes_basic_all)) + .route("/active", get(nodes_basic_active)) + .nest( + "/mixnodes", + Router::new() + .route("/active", get(mixnodes_basic_active)) + .route("/all", get(mixnodes_basic_all)), + ) + .nest( + "/entry-gateways", + Router::new() + .route("/active", get(entry_gateways_basic_active)) + .route("/all", get(entry_gateways_basic_all)), + ) + .nest( + "/exit-gateways", + Router::new() + .route("/active", get(exit_gateways_basic_active)) + .route("/all", get(exit_gateways_basic_all)), + ), + ) + .nest( + "/semi-skimmed", + Router::new().route("/", get(nodes_expanded)), + ) + .nest("/full-fat", Router::new().route("/", get(nodes_detailed))) + .route("/gateways/skimmed", get(skimmed::deprecated_gateways_basic)) + .route("/mixnodes/skimmed", get(skimmed::deprecated_mixnodes_basic)) + .route("/by-addresses", post(nodes_by_addresses)) + .layer(CompressionLayer::new()) +} diff --git a/nym-api/src/nym_nodes/handlers/unstable/semi_skimmed.rs b/nym-api/src/unstable_routes/v1/nym_nodes/semi_skimmed/mod.rs similarity index 74% rename from nym-api/src/nym_nodes/handlers/unstable/semi_skimmed.rs rename to nym-api/src/unstable_routes/v1/nym_nodes/semi_skimmed/mod.rs index f7dff11e3e5..e83c2203212 100644 --- a/nym-api/src/nym_nodes/handlers/unstable/semi_skimmed.rs +++ b/nym-api/src/unstable_routes/v1/nym_nodes/semi_skimmed/mod.rs @@ -1,9 +1,9 @@ -// Copyright 2024 - Nym Technologies SA +// Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only use crate::node_status_api::models::{AxumErrorResponse, AxumResult}; -use crate::nym_nodes::handlers::unstable::NodesParamsWithRole; use crate::support::http::state::AppState; +use crate::unstable_routes::v1::nym_nodes::helpers::NodesParamsWithRole; use axum::extract::{Query, State}; use nym_api_requests::nym_nodes::{CachedNodesResponse, SemiSkimmedNode}; use nym_http_api_common::FormattedResponse; @@ -15,11 +15,11 @@ use nym_http_api_common::FormattedResponse; path = "", context_path = "/v1/unstable/nym-nodes/semi-skimmed", responses( - // (status = 200, body = CachedNodesResponse) + // (status = 200, body = CachedNodesResponse) (status = 501) ) )] -pub(super) async fn nodes_expanded( +pub(crate) async fn nodes_expanded( _state: State, _query_params: Query, ) -> AxumResult>> { diff --git a/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/handlers.rs b/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/handlers.rs new file mode 100644 index 00000000000..ded98f2923a --- /dev/null +++ b/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/handlers.rs @@ -0,0 +1,309 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node_status_api::models::AxumResult; +use crate::support::http::state::AppState; +use crate::unstable_routes::v1::nym_nodes::helpers::{NodesParams, NodesParamsWithRole}; +use crate::unstable_routes::v1::nym_nodes::skimmed::helpers::{ + entry_gateways_basic, exit_gateways_basic, mixnodes_basic, nodes_basic, +}; +use crate::unstable_routes::v1::nym_nodes::skimmed::{ + PaginatedCachedNodesResponseSchema, PaginatedSkimmedNodes, +}; +use axum::extract::{Query, State}; +use nym_api_requests::nym_nodes::{CachedNodesResponse, NodeRoleQueryParam, SkimmedNode}; +use nym_http_api_common::FormattedResponse; + +/// Deprecated query that gets ALL gateways +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/gateways/skimmed", + context_path = "/v1/unstable/nym-nodes", + responses( + (status = 200, content( + (CachedNodesResponse = "application/json"), + (CachedNodesResponse = "application/yaml"), + (CachedNodesResponse = "application/bincode") + )) + ), +)] +#[deprecated(note = "use '/v1/unstable/nym-nodes/entry-gateways/skimmed/all' instead")] +#[allow(deprecated)] +pub(crate) async fn deprecated_gateways_basic( + state: State, + query_params: Query, +) -> AxumResult>> { + let output = query_params.output.unwrap_or_default(); + + // 1. call '/v1/unstable/skimmed/entry-gateways/all' + let all_gateways = entry_gateways_basic_all(state, query_params) + .await? + .into_inner(); + + // 3. return result + Ok(output.to_response(CachedNodesResponse { + refreshed_at: all_gateways.refreshed_at, + // 2. remove pagination + nodes: all_gateways.nodes.data, + })) +} + +/// Deprecated query that gets ACTIVE-ONLY mixnodes +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/mixnodes/skimmed", + context_path = "/v1/unstable/nym-nodes", + responses( + (status = 200, content( + (CachedNodesResponse = "application/json"), + (CachedNodesResponse = "application/yaml"), + (CachedNodesResponse = "application/bincode") + )) + ), +)] +#[deprecated(note = "use '/v1/unstable/nym-nodes/skimmed/mixnodes/active' instead")] +#[allow(deprecated)] +pub(crate) async fn deprecated_mixnodes_basic( + state: State, + query_params: Query, +) -> AxumResult>> { + let output = query_params.output.unwrap_or_default(); + + // 1. call '/v1/unstable/nym-nodes/skimmed/mixnodes/active' + let active_mixnodes = mixnodes_basic_active(state, query_params) + .await? + .into_inner(); + + // 3. return result + Ok(output.to_response(CachedNodesResponse { + refreshed_at: active_mixnodes.refreshed_at, + // 2. remove pagination + nodes: active_mixnodes.nodes.data, + })) +} + +/// Return all Nym Nodes and optionally legacy mixnodes/gateways (if `no-legacy` flag is not used) +/// that are currently bonded. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParamsWithRole), + path = "", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated(note = "use '/v2/unstable/nym-nodes/skimmed' instead")] +#[allow(deprecated)] +pub(crate) async fn nodes_basic_all( + state: State, + Query(query_params): Query, +) -> PaginatedSkimmedNodes { + if let Some(role) = query_params.role { + return match role { + NodeRoleQueryParam::ActiveMixnode => { + mixnodes_basic_all(state, Query(query_params.into())).await + } + NodeRoleQueryParam::EntryGateway => { + entry_gateways_basic_all(state, Query(query_params.into())).await + } + NodeRoleQueryParam::ExitGateway => { + exit_gateways_basic_all(state, Query(query_params.into())).await + } + }; + } + + nodes_basic(state, Query(query_params.into()), false).await +} + +/// Return Nym Nodes and optionally legacy mixnodes/gateways (if `no-legacy` flag is not used) +/// that are currently bonded and are in the **active set** +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/active", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated] +#[allow(deprecated)] +pub(crate) async fn nodes_basic_active( + state: State, + Query(query_params): Query, +) -> PaginatedSkimmedNodes { + if let Some(role) = query_params.role { + return match role { + NodeRoleQueryParam::ActiveMixnode => { + mixnodes_basic_active(state, Query(query_params.into())).await + } + NodeRoleQueryParam::EntryGateway => { + entry_gateways_basic_active(state, Query(query_params.into())).await + } + NodeRoleQueryParam::ExitGateway => { + exit_gateways_basic_active(state, Query(query_params.into())).await + } + }; + } + + nodes_basic(state, Query(query_params.into()), true).await +} + +/// Returns Nym Nodes and optionally legacy mixnodes (if `no-legacy` flag is not used) +/// that are currently bonded and support mixing role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/mixnodes/all", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated(note = "use '/v2/unstable/nym-nodes/skimmed/mixnodes/all' instead")] +pub(crate) async fn mixnodes_basic_all( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + mixnodes_basic(state, query_params, false).await +} + +/// Returns Nym Nodes and optionally legacy mixnodes (if `no-legacy` flag is not used) +/// that are currently bonded and are in the active set with one of the mixing roles. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/mixnodes/active", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated(note = "use '/v2/unstable/nym-nodes/skimmed/mixnodes/active' instead")] +pub(crate) async fn mixnodes_basic_active( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + mixnodes_basic(state, query_params, true).await +} + +/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) +/// that are currently bonded and are in the active set with the entry role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/entry-gateways/active", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated] +pub(crate) async fn entry_gateways_basic_active( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + entry_gateways_basic(state, query_params, true).await +} + +/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) +/// that are currently bonded and support entry gateway role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/entry-gateways/all", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated(note = "use '/v2/unstable/nym-nodes/skimmed/entry-gateways' instead")] +pub(crate) async fn entry_gateways_basic_all( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + entry_gateways_basic(state, query_params, false).await +} + +/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) +/// that are currently bonded and are in the active set with the exit role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/exit-gateways/active", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated] +pub(crate) async fn exit_gateways_basic_active( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + exit_gateways_basic(state, query_params, true).await +} + +/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) +/// that are currently bonded and support exit gateway role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/exit-gateways/all", + context_path = "/v1/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +#[deprecated(note = "use '/v2/unstable/nym-nodes/skimmed/exit-gateways' instead")] +pub(crate) async fn exit_gateways_basic_all( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + exit_gateways_basic(state, query_params, false).await +} diff --git a/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/helpers.rs b/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/helpers.rs new file mode 100644 index 00000000000..78bb32e1ebe --- /dev/null +++ b/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/helpers.rs @@ -0,0 +1,66 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::support::http::state::AppState; +use crate::unstable_routes::v1::nym_nodes::helpers::NodesParams; +use crate::unstable_routes::v1::nym_nodes::skimmed::PaginatedSkimmedNodes; +use crate::unstable_routes::v2; +use axum::extract::{Query, State}; + +pub(crate) async fn nodes_basic( + state: State, + Query(query_params): Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + Ok( + v2::nym_nodes::skimmed::helpers::nodes_basic( + state, + Query(query_params.into()), + active_only, + ) + .await? + .map(Into::into), + ) +} + +pub(crate) async fn mixnodes_basic( + state: State, + Query(query_params): Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + Ok(v2::nym_nodes::skimmed::helpers::mixnodes_basic( + state, + Query(query_params.into()), + active_only, + ) + .await? + .map(Into::into)) +} + +pub(crate) async fn entry_gateways_basic( + state: State, + Query(query_params): Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + Ok(v2::nym_nodes::skimmed::helpers::entry_gateways_basic( + state, + Query(query_params.into()), + active_only, + ) + .await? + .map(Into::into)) +} + +pub(crate) async fn exit_gateways_basic( + state: State, + query_params: Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + Ok(v2::nym_nodes::skimmed::helpers::exit_gateways_basic( + state, + Query(query_params.0.into()), + active_only, + ) + .await? + .map(Into::into)) +} diff --git a/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/mod.rs b/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/mod.rs new file mode 100644 index 00000000000..703ae853201 --- /dev/null +++ b/nym-api/src/unstable_routes/v1/nym_nodes/skimmed/mod.rs @@ -0,0 +1,26 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node_status_api::models::AxumResult; +use nym_api_requests::models::OffsetDateTimeJsonSchemaWrapper; +use nym_api_requests::nym_nodes::{PaginatedCachedNodesResponseV1, SkimmedNode}; +use nym_api_requests::pagination::PaginatedResponse; +use nym_http_api_common::FormattedResponse; +use utoipa::ToSchema; + +pub(crate) mod handlers; +pub(crate) mod helpers; + +pub type PaginatedSkimmedNodes = + AxumResult>>; + +pub(crate) use handlers::*; + +#[allow(dead_code)] // not dead, used in OpenAPI docs +#[derive(ToSchema)] +#[schema(title = "PaginatedCachedNodesResponse")] +pub struct PaginatedCachedNodesResponseSchema { + pub refreshed_at: OffsetDateTimeJsonSchemaWrapper, + #[schema(value_type = SkimmedNode)] + pub nodes: PaginatedResponse, +} diff --git a/nym-api/src/unstable_routes/v2/mod.rs b/nym-api/src/unstable_routes/v2/mod.rs new file mode 100644 index 00000000000..bda90bac785 --- /dev/null +++ b/nym-api/src/unstable_routes/v2/mod.rs @@ -0,0 +1,11 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::support::http::state::AppState; +use axum::Router; + +pub(crate) mod nym_nodes; + +pub(crate) fn unstable_routes_v2() -> Router { + Router::new().nest("/nym-nodes", nym_nodes::routes()) +} diff --git a/nym-api/src/unstable_routes/v2/nym_nodes/helpers.rs b/nym-api/src/unstable_routes/v2/nym_nodes/helpers.rs new file mode 100644 index 00000000000..9a24232a43e --- /dev/null +++ b/nym-api/src/unstable_routes/v2/nym_nodes/helpers.rs @@ -0,0 +1,93 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::support::http::helpers::PaginationRequest; +use crate::unstable_routes::v1; +use nym_api_requests::nym_nodes::NodeRoleQueryParam; +use nym_http_api_common::Output; +use serde::Deserialize; + +#[derive(Debug, Deserialize, utoipa::IntoParams)] +pub(crate) struct NodesParamsWithRole { + #[param(inline)] + pub(crate) role: Option, + + #[allow(dead_code)] + pub(crate) semver_compatibility: Option, + pub(crate) no_legacy: Option, + pub(crate) page: Option, + pub(crate) per_page: Option, + + // Identifier for the current epoch of the topology state. When sent by a client we can check if + // the client already knows about the latest topology state, allowing a `no-updates` response + // instead of wasting bandwidth serving an unchanged topology. + pub(crate) epoch_id: Option, + + pub(crate) output: Option, +} + +impl From for NodesParamsWithRole { + fn from(value: v1::nym_nodes::helpers::NodesParamsWithRole) -> Self { + NodesParamsWithRole { + role: value.role, + semver_compatibility: value.semver_compatibility, + no_legacy: value.no_legacy, + page: value.page, + per_page: value.per_page, + epoch_id: value.epoch_id, + output: value.output, + } + } +} + +#[derive(Debug, Deserialize, utoipa::IntoParams)] +#[into_params(parameter_in = Query)] +pub(crate) struct NodesParams { + #[allow(dead_code)] + pub(crate) semver_compatibility: Option, + pub(crate) no_legacy: Option, + pub(crate) page: Option, + pub(crate) per_page: Option, + + // Identifier for the current epoch of the topology state. When sent by a client we can check if + // the client already knows about the latest topology state, allowing a `no-updates` response + // instead of wasting bandwidth serving an unchanged topology. + pub(crate) epoch_id: Option, + pub(crate) output: Option, +} + +impl From for NodesParams { + fn from(value: v1::nym_nodes::helpers::NodesParams) -> Self { + NodesParams { + semver_compatibility: value.semver_compatibility, + no_legacy: value.no_legacy, + page: value.page, + per_page: value.per_page, + epoch_id: value.epoch_id, + output: value.output, + } + } +} + +impl From for NodesParams { + fn from(params: NodesParamsWithRole) -> Self { + NodesParams { + semver_compatibility: params.semver_compatibility, + no_legacy: params.no_legacy, + page: params.page, + per_page: params.per_page, + epoch_id: params.epoch_id, + output: params.output, + } + } +} + +impl<'a> From<&'a NodesParams> for PaginationRequest { + fn from(params: &'a NodesParams) -> Self { + PaginationRequest { + output: params.output, + page: params.page, + per_page: params.per_page, + } + } +} diff --git a/nym-api/src/unstable_routes/v2/nym_nodes/mod.rs b/nym-api/src/unstable_routes/v2/nym_nodes/mod.rs new file mode 100644 index 00000000000..725fa16b2a5 --- /dev/null +++ b/nym-api/src/unstable_routes/v2/nym_nodes/mod.rs @@ -0,0 +1,33 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use crate::support::http::state::AppState; +use crate::unstable_routes::v2::nym_nodes::skimmed::{ + entry_gateways_basic_all, exit_gateways_basic_all, mixnodes_basic_active, mixnodes_basic_all, + nodes_basic_all, +}; +use axum::routing::get; +use axum::Router; +use tower_http::compression::CompressionLayer; + +pub(crate) mod helpers; +pub(crate) mod skimmed; + +#[allow(deprecated)] +pub(crate) fn routes() -> Router { + Router::new() + .nest( + "/skimmed", + Router::new() + .route("/", get(nodes_basic_all)) + .nest( + "/mixnodes", + Router::new() + .route("/active", get(mixnodes_basic_active)) + .route("/all", get(mixnodes_basic_all)), + ) + .route("/entry-gateways", get(entry_gateways_basic_all)) + .route("/exit-gateways", get(exit_gateways_basic_all)), + ) + .layer(CompressionLayer::new()) +} diff --git a/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/handlers.rs b/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/handlers.rs new file mode 100644 index 00000000000..c230ec5b310 --- /dev/null +++ b/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/handlers.rs @@ -0,0 +1,142 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::support::http::state::AppState; +use crate::unstable_routes::v2::nym_nodes::helpers::{NodesParams, NodesParamsWithRole}; +use crate::unstable_routes::v2::nym_nodes::skimmed::helpers::{ + entry_gateways_basic, exit_gateways_basic, mixnodes_basic, nodes_basic, +}; +use crate::unstable_routes::v2::nym_nodes::skimmed::{ + PaginatedCachedNodesResponseSchema, PaginatedSkimmedNodes, +}; +use axum::extract::{Query, State}; +use nym_api_requests::nym_nodes::NodeRoleQueryParam; + +/// Return all Nym Nodes and optionally legacy mixnodes/gateways (if `no-legacy` flag is not used) +/// that are currently bonded. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParamsWithRole), + path = "", + context_path = "/v2/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +pub(crate) async fn nodes_basic_all( + state: State, + Query(query_params): Query, +) -> PaginatedSkimmedNodes { + if let Some(role) = query_params.role { + return match role { + NodeRoleQueryParam::ActiveMixnode => { + mixnodes_basic_all(state, Query(query_params.into())).await + } + NodeRoleQueryParam::EntryGateway => { + entry_gateways_basic_all(state, Query(query_params.into())).await + } + NodeRoleQueryParam::ExitGateway => { + exit_gateways_basic_all(state, Query(query_params.into())).await + } + }; + } + + nodes_basic(state, Query(query_params.into()), false).await +} + +/// Returns Nym Nodes and optionally legacy mixnodes (if `no-legacy` flag is not used) +/// that are currently bonded and support mixing role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/mixnodes/all", + context_path = "/v2/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +pub(crate) async fn mixnodes_basic_all( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + mixnodes_basic(state, query_params, false).await +} + +/// Returns Nym Nodes and optionally legacy mixnodes (if `no-legacy` flag is not used) +/// that are currently bonded and are in the active set with one of the mixing roles. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/mixnodes/active", + context_path = "/v2/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +pub(crate) async fn mixnodes_basic_active( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + mixnodes_basic(state, query_params, true).await +} + +/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) +/// that are currently bonded and support entry gateway role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/entry-gateways/all", + context_path = "/v2/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +pub(crate) async fn entry_gateways_basic_all( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + entry_gateways_basic(state, query_params, false).await +} + +/// Returns Nym Nodes and optionally legacy gateways (if `no-legacy` flag is not used) +/// that are currently bonded and support exit gateway role. +#[utoipa::path( + tag = "Unstable Nym Nodes", + get, + params(NodesParams), + path = "/exit-gateways/all", + context_path = "/v2/unstable/nym-nodes/skimmed", + responses( + (status = 200, content( + (PaginatedCachedNodesResponseSchema = "application/json"), + (PaginatedCachedNodesResponseSchema = "application/yaml"), + (PaginatedCachedNodesResponseSchema = "application/bincode") + )) + ), +)] +pub(crate) async fn exit_gateways_basic_all( + state: State, + query_params: Query, +) -> PaginatedSkimmedNodes { + exit_gateways_basic(state, query_params, false).await +} diff --git a/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/helpers.rs b/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/helpers.rs new file mode 100644 index 00000000000..1f1fb392036 --- /dev/null +++ b/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/helpers.rs @@ -0,0 +1,365 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node_describe_cache::cache::DescribedNodes; +use crate::node_status_api::models::AxumErrorResponse; +use crate::support::caching::Cache; +use crate::support::http::state::AppState; +use crate::unstable_routes::helpers::{refreshed_at, LegacyAnnotation}; +use crate::unstable_routes::v2::nym_nodes::helpers::NodesParams; +use crate::unstable_routes::v2::nym_nodes::skimmed::PaginatedSkimmedNodes; +use axum::extract::{Query, State}; +use nym_api_requests::models::{ + NodeAnnotation, NymNodeDescription, OffsetDateTimeJsonSchemaWrapper, +}; +use nym_api_requests::nym_nodes::{NodeRole, PaginatedCachedNodesResponseV2, SkimmedNode}; +use nym_http_api_common::Output; +use nym_mixnet_contract_common::{Interval, NodeId}; +use nym_topology::CachedEpochRewardedSet; +use std::collections::HashMap; +use std::future::Future; +use std::time::Duration; +use tokio::sync::RwLockReadGuard; +use tracing::trace; + +/// Given all relevant caches, build part of response for JUST Nym Nodes +fn build_nym_nodes_response<'a, NI>( + rewarded_set: &CachedEpochRewardedSet, + nym_nodes_subset: NI, + annotations: &HashMap, + current_key_rotation: u32, + active_only: bool, +) -> Vec +where + NI: Iterator + 'a, +{ + let mut nodes = Vec::new(); + for nym_node in nym_nodes_subset { + let node_id = nym_node.node_id; + + let role: NodeRole = rewarded_set.role(node_id).into(); + + // if the role is inactive, see if our filter allows it + if active_only && role.is_inactive() { + continue; + } + + // honestly, not sure under what exact circumstances this value could be missing, + // but in that case just use 0 performance + let annotation = annotations.get(&node_id).copied().unwrap_or_default(); + + nodes.push(nym_node.to_skimmed_node( + current_key_rotation, + role, + annotation.last_24h_performance, + )); + } + nodes +} + +/// Given all relevant caches, add appropriate legacy nodes to the part of the response +fn add_legacy( + nodes: &mut Vec, + rewarded_set: &CachedEpochRewardedSet, + describe_cache: &DescribedNodes, + annotated_legacy_nodes: &HashMap, + current_key_rotation: u32, + active_only: bool, +) where + LN: LegacyAnnotation, +{ + for (node_id, legacy) in annotated_legacy_nodes.iter() { + let role: NodeRole = rewarded_set.role(*node_id).into(); + + // if the role is inactive, see if our filter allows it + if active_only && role.is_inactive() { + continue; + } + + // if we have self-described info, prefer it over contract data + if let Some(described) = describe_cache.get_node(node_id) { + // legacy nodes don't support key rotation + nodes.push(described.to_skimmed_node(current_key_rotation, role, legacy.performance())) + } else { + match legacy.try_to_skimmed_node(role) { + Ok(node) => nodes.push(node), + Err(err) => { + let id = legacy.identity(); + trace!("node {id} is malformed: {err}") + } + } + } + } +} + +fn maybe_add_expires_header( + output: Output, + interval: Interval, + current_key_rotation: u32, + refreshed_at: OffsetDateTimeJsonSchemaWrapper, + nodes: Vec, + active_only: bool, +) -> PaginatedSkimmedNodes { + let base_response = output.to_response( + PaginatedCachedNodesResponseV2::new_full( + interval.current_epoch_absolute_id(), + current_key_rotation, + refreshed_at, + nodes, + ) + .fresh(interval), + ); + + if !active_only { + return Ok(base_response); + } + + // if caller requested only active nodes, the response is valid until the epoch changes + // (but add 2 minutes due to epoch transition not being instantaneous + let epoch_end = interval.current_epoch_end(); + let expiration = epoch_end + Duration::from_secs(120); + Ok(base_response.with_expires_header(expiration)) +} + +// hehe, what an abomination, but it's used in multiple different places and I hate copy-pasting code, +// especially if it has multiple loops, etc +pub(crate) async fn build_skimmed_nodes_response<'a, NI, LG, Fut, LN>( + state: &'a AppState, + Query(query_params): Query, + nym_nodes_subset: NI, + annotated_legacy_nodes_getter: LG, + active_only: bool, + output: Output, +) -> PaginatedSkimmedNodes +where + // iterator returning relevant subset of nym-nodes (like mixing nym-nodes, entries, etc.) + NI: Iterator + 'a, + + // async function that returns cache of appropriate legacy nodes (mixnodes or gateways) + LG: Fn(&'a AppState) -> Fut, + Fut: + Future>>, AxumErrorResponse>>, + + // the legacy node (MixNodeBondAnnotated or GatewayBondAnnotated) + LN: LegacyAnnotation + 'a, +{ + // TODO: implement it + let _ = query_params.per_page; + let _ = query_params.page; + + // 1. get the rewarded set + let rewarded_set = state.rewarded_set().await?; + + // 2. grab all annotations so that we could attach scores to the [nym] nodes + let annotations = state.node_annotations().await?; + + // 3. implicitly grab the relevant described nodes + // (ideally it'd be tied directly to the NI iterator, but I couldn't defeat the compiler) + let describe_cache = state.describe_nodes_cache_data().await?; + + let contract_cache = state.nym_contract_cache(); + + let interval = contract_cache.current_interval().await?; + let current_key_rotation = contract_cache.current_key_rotation_id().await?; + + // 4.0 If the client indicates that they already know about the current topology send empty response + if let Some(client_known_epoch) = query_params.epoch_id { + if client_known_epoch == interval.current_epoch_id() { + return Ok( + output.to_response(PaginatedCachedNodesResponseV2::no_updates( + interval.current_epoch_absolute_id(), + current_key_rotation, + )), + ); + } + } + + // 4. start building the response + let mut nodes = build_nym_nodes_response( + &rewarded_set, + nym_nodes_subset, + &annotations, + current_key_rotation, + active_only, + ); + + // 5. if we allow legacy nodes, repeat the procedure for them, otherwise return just nym-nodes + if let Some(true) = query_params.no_legacy { + // min of all caches + let refreshed_at = refreshed_at([ + rewarded_set.timestamp(), + annotations.timestamp(), + describe_cache.timestamp(), + ]); + + return maybe_add_expires_header( + output, + interval, + current_key_rotation, + refreshed_at, + nodes, + active_only, + ); + } + + // 6. grab relevant legacy nodes + // (due to the existence of the legacy endpoints, we already have fully annotated data on them) + let annotated_legacy_nodes = annotated_legacy_nodes_getter(state).await?; + add_legacy( + &mut nodes, + &rewarded_set, + &describe_cache, + &annotated_legacy_nodes, + current_key_rotation, + active_only, + ); + + // min of all caches + let refreshed_at = refreshed_at([ + rewarded_set.timestamp(), + annotations.timestamp(), + describe_cache.timestamp(), + annotated_legacy_nodes.timestamp(), + ]); + + maybe_add_expires_header( + output, + interval, + current_key_rotation, + refreshed_at, + nodes, + active_only, + ) +} + +pub(crate) async fn nodes_basic( + state: State, + Query(query_params): Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + let output = query_params.output.unwrap_or_default(); + + // unfortunately we have to build the response semi-manually here as we need to add two sources of legacy nodes + + // 1. grab all relevant described nym-nodes + let rewarded_set = state.rewarded_set().await?; + + let describe_cache = state.describe_nodes_cache_data().await?; + let all_nym_nodes = describe_cache.all_nym_nodes(); + let annotations = state.node_annotations().await?; + let legacy_mixnodes = state.legacy_mixnode_annotations().await?; + let legacy_gateways = state.legacy_gateways_annotations().await?; + + let interval = state.nym_contract_cache().current_interval().await?; + let current_key_rotation = state.nym_contract_cache().current_key_rotation_id().await?; + + let mut nodes = build_nym_nodes_response( + &rewarded_set, + all_nym_nodes, + &annotations, + current_key_rotation, + active_only, + ); + + // add legacy gateways to the response + add_legacy( + &mut nodes, + &rewarded_set, + &describe_cache, + &legacy_gateways, + current_key_rotation, + active_only, + ); + + // add legacy mixnodes to the response + add_legacy( + &mut nodes, + &rewarded_set, + &describe_cache, + &legacy_mixnodes, + current_key_rotation, + active_only, + ); + + // min of all caches + let refreshed_at = refreshed_at([ + rewarded_set.timestamp(), + annotations.timestamp(), + describe_cache.timestamp(), + legacy_mixnodes.timestamp(), + legacy_gateways.timestamp(), + ]); + + Ok(output.to_response(PaginatedCachedNodesResponseV2::new_full( + interval.current_epoch_absolute_id(), + current_key_rotation, + refreshed_at, + nodes, + ))) +} + +pub(crate) async fn mixnodes_basic( + state: State, + query_params: Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + let output = query_params.output.unwrap_or_default(); + + // 1. grab all relevant described nym-nodes + let describe_cache = state.describe_nodes_cache_data().await?; + let mixing_nym_nodes = describe_cache.mixing_nym_nodes(); + + build_skimmed_nodes_response( + &state.0, + query_params, + mixing_nym_nodes, + |state| state.legacy_mixnode_annotations(), + active_only, + output, + ) + .await +} + +pub(crate) async fn entry_gateways_basic( + state: State, + query_params: Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + let output = query_params.output.unwrap_or_default(); + + // 1. grab all relevant described nym-nodes + let describe_cache = state.describe_nodes_cache_data().await?; + let mixing_nym_nodes = describe_cache.entry_capable_nym_nodes(); + + build_skimmed_nodes_response( + &state.0, + query_params, + mixing_nym_nodes, + |state| state.legacy_gateways_annotations(), + active_only, + output, + ) + .await +} + +pub(crate) async fn exit_gateways_basic( + state: State, + query_params: Query, + active_only: bool, +) -> PaginatedSkimmedNodes { + let output = query_params.output.unwrap_or_default(); + + // 1. grab all relevant described nym-nodes + let describe_cache = state.describe_nodes_cache_data().await?; + let mixing_nym_nodes = describe_cache.exit_capable_nym_nodes(); + + build_skimmed_nodes_response( + &state.0, + query_params, + mixing_nym_nodes, + |state| state.legacy_gateways_annotations(), + active_only, + output, + ) + .await +} diff --git a/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/mod.rs b/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/mod.rs new file mode 100644 index 00000000000..bda0975342d --- /dev/null +++ b/nym-api/src/unstable_routes/v2/nym_nodes/skimmed/mod.rs @@ -0,0 +1,26 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node_status_api::models::AxumResult; +use nym_api_requests::models::OffsetDateTimeJsonSchemaWrapper; +use nym_api_requests::nym_nodes::{PaginatedCachedNodesResponseV2, SkimmedNode}; +use nym_api_requests::pagination::PaginatedResponse; +use nym_http_api_common::FormattedResponse; +use utoipa::ToSchema; + +pub(crate) mod handlers; +pub(crate) mod helpers; + +pub type PaginatedSkimmedNodes = + AxumResult>>; + +pub(crate) use handlers::*; + +#[allow(dead_code)] // not dead, used in OpenAPI docs +#[derive(ToSchema)] +#[schema(title = "PaginatedCachedNodesResponse")] +pub struct PaginatedCachedNodesResponseSchema { + pub refreshed_at: OffsetDateTimeJsonSchemaWrapper, + #[schema(value_type = SkimmedNode)] + pub nodes: PaginatedResponse, +} diff --git a/nym-network-monitor/src/accounting.rs b/nym-network-monitor/src/accounting.rs index 157bbc7c17f..9132e582c0b 100644 --- a/nym-network-monitor/src/accounting.rs +++ b/nym-network-monitor/src/accounting.rs @@ -9,7 +9,7 @@ use log::{debug, error, info}; use nym_sphinx::chunking::{monitoring, SentFragment}; use nym_topology::{NymRouteProvider, RoutingNode}; use nym_types::monitoring::{MonitorMessage, NodeResult}; -use nym_validator_client::nym_api::routes::{API_VERSION, STATUS, SUBMIT_GATEWAY, SUBMIT_NODE}; +use nym_validator_client::nym_api::routes::{STATUS, SUBMIT_GATEWAY, SUBMIT_NODE, V1_API_VERSION}; use rand::SeedableRng; use rand_chacha::ChaCha8Rng; use serde::{Deserialize, Serialize}; @@ -497,9 +497,11 @@ pub async fn submit_metrics(database_url: Option<&String>) -> anyhow::Result<()> info!("Submitting metrics to {}", *NYM_API_URL); let client = reqwest::Client::new(); - let node_submit_url = format!("{}/{API_VERSION}/{STATUS}/{SUBMIT_NODE}", &*NYM_API_URL); - let gateway_submit_url = - format!("{}/{API_VERSION}/{STATUS}/{SUBMIT_GATEWAY}", &*NYM_API_URL); + let node_submit_url = format!("{}/{V1_API_VERSION}/{STATUS}/{SUBMIT_NODE}", &*NYM_API_URL); + let gateway_submit_url = format!( + "{}/{V1_API_VERSION}/{STATUS}/{SUBMIT_GATEWAY}", + &*NYM_API_URL + ); info!("Submitting {} mixnode measurements", node_stats.len()); diff --git a/nym-node/Cargo.toml b/nym-node/Cargo.toml index 72eb5924eb0..c64d22c4088 100644 --- a/nym-node/Cargo.toml +++ b/nym-node/Cargo.toml @@ -50,7 +50,7 @@ nym-bin-common = { path = "../common/bin-common", features = [ "basic_tracing", "output_format", ] } -nym-client-core-config-types = { path = "../common/client-core/config-types" } +nym-client-core-config-types = { path = "../common/client-core/config-types", features = ["disk-persistence"] } nym-config = { path = "../common/config" } nym-crypto = { path = "../common/crypto", features = ["asymmetric", "rand"] } nym-nonexhaustive-delayqueue = { path = "../common/nonexhaustive-delayqueue" } @@ -115,6 +115,7 @@ cargo_metadata = { workspace = true } [dev-dependencies] criterion = { workspace = true, features = ["async_tokio"] } +rand_chacha = { workspace = true } [lints] diff --git a/nym-node/nym-node-requests/src/api/mod.rs b/nym-node/nym-node-requests/src/api/mod.rs index c2ae5badb4c..5dac9e83045 100644 --- a/nym-node/nym-node-requests/src/api/mod.rs +++ b/nym-node/nym-node-requests/src/api/mod.rs @@ -1,14 +1,15 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -use crate::api::v1::node::models::{LegacyHostInformation, LegacyHostInformationV2}; +use crate::api::v1::node::models::{ + LegacyHostInformationV1, LegacyHostInformationV2, LegacyHostInformationV3, +}; use crate::error::Error; use nym_crypto::asymmetric::ed25519; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; use std::ops::Deref; -use utoipa::ToSchema; #[cfg(feature = "client")] pub mod client; @@ -20,7 +21,7 @@ pub use client::Client; // create the type alias manually if openapi is not enabled pub type SignedHostInformation = SignedData; -#[derive(ToSchema)] +#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] pub struct SignedDataHostInfo { // #[serde(flatten)] pub data: crate::api::v1::node::models::HostInformation, @@ -67,8 +68,17 @@ impl SignedHostInformation { } // attempt to verify legacy signatures + let legacy_v3 = SignedData { + data: LegacyHostInformationV3::from(self.data.clone()), + signature: self.signature.clone(), + }; + + if legacy_v3.verify(&self.keys.ed25519_identity) { + return true; + } + let legacy_v2 = SignedData { - data: LegacyHostInformationV2::from(self.data.clone()), + data: LegacyHostInformationV2::from(legacy_v3.data), signature: self.signature.clone(), }; @@ -77,7 +87,7 @@ impl SignedHostInformation { } SignedData { - data: LegacyHostInformation::from(legacy_v2.data), + data: LegacyHostInformationV1::from(legacy_v2.data), signature: self.signature.clone(), } .verify(&self.keys.ed25519_identity) @@ -103,9 +113,11 @@ impl Display for ErrorResponse { } } +#[allow(deprecated)] #[cfg(test)] mod tests { use super::*; + use crate::api::v1::node::models::{HostKeys, SphinxKey}; use nym_crypto::asymmetric::{ed25519, x25519}; use rand_chacha::rand_core::SeedableRng; @@ -114,14 +126,66 @@ mod tests { let mut rng = rand_chacha::ChaCha20Rng::from_seed([0u8; 32]); let ed22519 = ed25519::KeyPair::new(&mut rng); let x25519_sphinx = x25519::KeyPair::new(&mut rng); + let x25519_sphinx2 = x25519::KeyPair::new(&mut rng); let x25519_noise = x25519::KeyPair::new(&mut rng); + let current_rotation_id = 1234; + + // no pre-announced keys + let host_info = crate::api::v1::node::models::HostInformation { + ip_address: vec!["1.1.1.1".parse().unwrap()], + hostname: Some("foomp.com".to_string()), + keys: crate::api::v1::node::models::HostKeys { + ed25519_identity: *ed22519.public_key(), + x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: current_rotation_id, + public_key: *x25519_sphinx.public_key(), + }, + x25519_noise: None, + pre_announced_x25519_sphinx_key: None, + }, + }; + + let signed_info = SignedHostInformation::new(host_info, ed22519.private_key()).unwrap(); + assert!(signed_info.verify(ed22519.public_key())); + assert!(signed_info.verify_host_information()); + + let host_info_with_noise = crate::api::v1::node::models::HostInformation { + ip_address: vec!["1.1.1.1".parse().unwrap()], + hostname: Some("foomp.com".to_string()), + keys: crate::api::v1::node::models::HostKeys { + ed25519_identity: *ed22519.public_key(), + x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: current_rotation_id, + public_key: *x25519_sphinx.public_key(), + }, + pre_announced_x25519_sphinx_key: None, + x25519_noise: Some(*x25519_noise.public_key()), + }, + }; + + let signed_info = + SignedHostInformation::new(host_info_with_noise, ed22519.private_key()).unwrap(); + assert!(signed_info.verify(ed22519.public_key())); + assert!(signed_info.verify_host_information()); + + // with pre-announced keys let host_info = crate::api::v1::node::models::HostInformation { ip_address: vec!["1.1.1.1".parse().unwrap()], hostname: Some("foomp.com".to_string()), keys: crate::api::v1::node::models::HostKeys { ed25519_identity: *ed22519.public_key(), x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: current_rotation_id, + public_key: *x25519_sphinx.public_key(), + }, + pre_announced_x25519_sphinx_key: Some(SphinxKey { + rotation_id: current_rotation_id + 1, + public_key: *x25519_sphinx2.public_key(), + }), x25519_noise: None, }, }; @@ -136,6 +200,14 @@ mod tests { keys: crate::api::v1::node::models::HostKeys { ed25519_identity: *ed22519.public_key(), x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: current_rotation_id, + public_key: *x25519_sphinx.public_key(), + }, + pre_announced_x25519_sphinx_key: Some(SphinxKey { + rotation_id: current_rotation_id + 1, + public_key: *x25519_sphinx2.public_key(), + }), x25519_noise: Some(*x25519_noise.public_key()), }, }; @@ -146,6 +218,54 @@ mod tests { assert!(signed_info.verify_host_information()); } + #[test] + fn dummy_legacy_v3_signed_host_verification() { + let mut rng = rand_chacha::ChaCha20Rng::from_seed([0u8; 32]); + let ed22519 = ed25519::KeyPair::new(&mut rng); + let x25519_sphinx = x25519::KeyPair::new(&mut rng); + let x25519_noise = x25519::KeyPair::new(&mut rng); + + let legacy_info = crate::api::v1::node::models::LegacyHostInformationV3 { + ip_address: vec!["1.1.1.1".parse().unwrap()], + hostname: Some("foomp.com".to_string()), + keys: crate::api::v1::node::models::LegacyHostKeysV3 { + ed25519_identity: *ed22519.public_key(), + x25519_sphinx: *x25519_sphinx.public_key(), + x25519_noise: Some(*x25519_noise.public_key()), + }, + }; + + // note the usage of u32::max rotation id (as that's what the legacy data would be deserialised into) + let current_struct = crate::api::v1::node::models::HostInformation { + ip_address: vec!["1.1.1.1".parse().unwrap()], + hostname: Some("foomp.com".to_string()), + keys: HostKeys { + ed25519_identity: *ed22519.public_key(), + x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: u32::MAX, + public_key: *x25519_sphinx.public_key(), + }, + pre_announced_x25519_sphinx_key: None, + x25519_noise: Some(*x25519_noise.public_key()), + }, + }; + + // signature on legacy data + let signature = SignedData::new(legacy_info, ed22519.private_key()) + .unwrap() + .signature; + + // signed blob with the 'current' structure + let current_struct = SignedData { + data: current_struct, + signature, + }; + + assert!(!current_struct.verify(ed22519.public_key())); + assert!(current_struct.verify_host_information()) + } + #[test] fn dummy_legacy_v2_signed_host_verification() { let mut rng = rand_chacha::ChaCha20Rng::from_seed([0u8; 32]); @@ -173,22 +293,34 @@ mod tests { }, }; + // note the usage of u32::max rotation id (as that's what the legacy data would be deserialised into) let host_info_no_noise = crate::api::v1::node::models::HostInformation { ip_address: legacy_info_no_noise.ip_address.clone(), hostname: legacy_info_no_noise.hostname.clone(), keys: crate::api::v1::node::models::HostKeys { ed25519_identity: legacy_info_no_noise.keys.ed25519_identity.parse().unwrap(), - x25519_sphinx: legacy_info_no_noise.keys.x25519_sphinx.parse().unwrap(), + x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: u32::MAX, + public_key: *x25519_sphinx.public_key(), + }, + pre_announced_x25519_sphinx_key: None, x25519_noise: None, }, }; + // note the usage of u32::max rotation id (as that's what the legacy data would be deserialised into) let host_info_noise = crate::api::v1::node::models::HostInformation { ip_address: legacy_info_noise.ip_address.clone(), hostname: legacy_info_noise.hostname.clone(), keys: crate::api::v1::node::models::HostKeys { ed25519_identity: legacy_info_noise.keys.ed25519_identity.parse().unwrap(), - x25519_sphinx: legacy_info_noise.keys.x25519_sphinx.parse().unwrap(), + x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: u32::MAX, + public_key: *x25519_sphinx.public_key(), + }, + pre_announced_x25519_sphinx_key: None, x25519_noise: Some(legacy_info_noise.keys.x25519_noise.parse().unwrap()), }, }; @@ -216,32 +348,37 @@ mod tests { assert!(!current_struct_no_noise.verify(ed22519.public_key())); assert!(current_struct_no_noise.verify_host_information()); - // if noise key is present, the signature is actually valid - assert!(current_struct_noise.verify(ed22519.public_key())); + assert!(!current_struct_noise.verify(ed22519.public_key())); assert!(current_struct_noise.verify_host_information()) } #[test] - fn dummy_legacy_signed_host_verification() { + fn dummy_legacy_v1_signed_host_verification() { let mut rng = rand_chacha::ChaCha20Rng::from_seed([0u8; 32]); let ed22519 = ed25519::KeyPair::new(&mut rng); let x25519_sphinx = x25519::KeyPair::new(&mut rng); - let legacy_info = crate::api::v1::node::models::LegacyHostInformation { + let legacy_info = crate::api::v1::node::models::LegacyHostInformationV1 { ip_address: vec!["1.1.1.1".parse().unwrap()], hostname: Some("foomp.com".to_string()), - keys: crate::api::v1::node::models::LegacyHostKeys { + keys: crate::api::v1::node::models::LegacyHostKeysV1 { ed25519: ed22519.public_key().to_base58_string(), x25519: x25519_sphinx.public_key().to_base58_string(), }, }; + // note the usage of u32::max rotation id (as that's what the legacy data would be deserialised into) let host_info = crate::api::v1::node::models::HostInformation { ip_address: legacy_info.ip_address.clone(), hostname: legacy_info.hostname.clone(), keys: crate::api::v1::node::models::HostKeys { ed25519_identity: legacy_info.keys.ed25519.parse().unwrap(), - x25519_sphinx: legacy_info.keys.x25519.parse().unwrap(), + x25519_sphinx: *x25519_sphinx.public_key(), + primary_x25519_sphinx_key: SphinxKey { + rotation_id: u32::MAX, + public_key: *x25519_sphinx.public_key(), + }, + pre_announced_x25519_sphinx_key: None, x25519_noise: None, }, }; diff --git a/nym-node/nym-node-requests/src/api/v1/node/models.rs b/nym-node/nym-node-requests/src/api/v1/node/models.rs index 5f047912faa..9d63a10380e 100644 --- a/nym-node/nym-node-requests/src/api/v1/node/models.rs +++ b/nym-node/nym-node-requests/src/api/v1/node/models.rs @@ -70,6 +70,13 @@ impl HostInformation { } } +#[derive(Serialize)] +pub struct LegacyHostInformationV3 { + pub ip_address: Vec, + pub hostname: Option, + pub keys: LegacyHostKeysV3, +} + #[derive(Serialize)] pub struct LegacyHostInformationV2 { pub ip_address: Vec, @@ -78,14 +85,24 @@ pub struct LegacyHostInformationV2 { } #[derive(Serialize)] -pub struct LegacyHostInformation { +pub struct LegacyHostInformationV1 { pub ip_address: Vec, pub hostname: Option, - pub keys: LegacyHostKeys, + pub keys: LegacyHostKeysV1, } -impl From for LegacyHostInformationV2 { +impl From for LegacyHostInformationV3 { fn from(value: HostInformation) -> Self { + LegacyHostInformationV3 { + ip_address: value.ip_address, + hostname: value.hostname, + keys: value.keys.into(), + } + } +} + +impl From for LegacyHostInformationV2 { + fn from(value: LegacyHostInformationV3) -> Self { LegacyHostInformationV2 { ip_address: value.ip_address, hostname: value.hostname, @@ -94,9 +111,9 @@ impl From for LegacyHostInformationV2 { } } -impl From for LegacyHostInformation { +impl From for LegacyHostInformationV1 { fn from(value: LegacyHostInformationV2) -> Self { - LegacyHostInformation { + LegacyHostInformationV1 { ip_address: value.ip_address, hostname: value.hostname, keys: value.keys.into(), @@ -106,27 +123,121 @@ impl From for LegacyHostInformation { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] +#[serde(from = "HostKeysDeHelper")] pub struct HostKeys { + /// Base58-encoded ed25519 public key of this node. Currently, it corresponds to either mixnode's or gateway's identity. + #[schemars(with = "String")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] + #[serde(with = "bs58_ed25519_pubkey")] + pub ed25519_identity: ed25519::PublicKey, + + #[deprecated(note = "use explicit primary_x25519_sphinx_key instead")] + #[schemars(with = "String")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] + #[serde(with = "bs58_x25519_pubkey")] + pub x25519_sphinx: x25519::PublicKey, + + /// Current, active, x25519 sphinx key clients are expected to use when constructing packets + /// with this node in the route. + pub primary_x25519_sphinx_key: SphinxKey, + + /// Pre-announced x25519 sphinx key clients will use during the following key rotation + pub pre_announced_x25519_sphinx_key: Option, + + /// Base58-encoded x25519 public key of this node used for the noise protocol. + #[schemars(with = "Option")] + #[cfg_attr(feature = "openapi", schema(value_type = Option))] + pub x25519_noise: Option, +} + +// we need the intermediate struct to help us with the new explicit sphinx key fields +#[allow(deprecated)] +impl From for HostKeys { + fn from(value: HostKeysDeHelper) -> Self { + let primary_x25519_sphinx_key = match value.primary_x25519_sphinx_key { + None => { + // legacy + SphinxKey::new_legacy(value.x25519_sphinx) + } + Some(primary_x25519_sphinx_key) => primary_x25519_sphinx_key, + }; + + HostKeys { + ed25519_identity: value.ed25519_identity, + x25519_sphinx: value.x25519_sphinx, + primary_x25519_sphinx_key, + pre_announced_x25519_sphinx_key: value.pre_announced_x25519_sphinx_key, + x25519_noise: value.x25519_noise, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +struct HostKeysDeHelper { /// Base58-encoded ed25519 public key of this node. Currently, it corresponds to either mixnode's or gateway's identity. #[serde(alias = "ed25519")] #[serde(with = "bs58_ed25519_pubkey")] + pub ed25519_identity: ed25519::PublicKey, + + #[deprecated(note = "use explicit primary_x25519_sphinx_key instead")] + #[serde(alias = "x25519")] + #[serde(with = "bs58_x25519_pubkey")] + pub x25519_sphinx: x25519::PublicKey, + + /// Current, active, x25519 sphinx key clients are expected to use when constructing packets + /// with this node in the route. + pub primary_x25519_sphinx_key: Option, + + /// Pre-announced x25519 sphinx key clients will use during the following key rotation + #[serde(default)] + pub pre_announced_x25519_sphinx_key: Option, + + /// Base58-encoded x25519 public key of this node used for the noise protocol. + #[serde(default)] + #[serde(with = "option_bs58_x25519_pubkey")] + pub x25519_noise: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] +pub struct SphinxKey { + pub rotation_id: u32, + + #[serde(with = "bs58_x25519_pubkey")] #[schemars(with = "String")] #[cfg_attr(feature = "openapi", schema(value_type = String))] + pub public_key: x25519::PublicKey, +} + +impl SphinxKey { + pub fn new_legacy(public_key: x25519::PublicKey) -> SphinxKey { + SphinxKey { + rotation_id: u32::MAX, + public_key, + } + } + + pub fn is_legacy(&self) -> bool { + self.rotation_id == u32::MAX + } +} + +#[derive(Serialize)] +pub struct LegacyHostKeysV3 { + /// Base58-encoded ed25519 public key of this node. Currently, it corresponds to either mixnode's or gateway's identity. + #[serde(alias = "ed25519")] + #[serde(with = "bs58_ed25519_pubkey")] pub ed25519_identity: ed25519::PublicKey, /// Base58-encoded x25519 public key of this node used for sphinx/outfox packet creation. /// Currently, it corresponds to either mixnode's or gateway's key. #[serde(alias = "x25519")] #[serde(with = "bs58_x25519_pubkey")] - #[schemars(with = "String")] - #[cfg_attr(feature = "openapi", schema(value_type = String))] pub x25519_sphinx: x25519::PublicKey, /// Base58-encoded x25519 public key of this node used for the noise protocol. #[serde(default)] #[serde(with = "option_bs58_x25519_pubkey")] - #[schemars(with = "Option")] - #[cfg_attr(feature = "openapi", schema(value_type = Option))] pub x25519_noise: Option, } @@ -138,13 +249,23 @@ pub struct LegacyHostKeysV2 { } #[derive(Serialize)] -pub struct LegacyHostKeys { +pub struct LegacyHostKeysV1 { pub ed25519: String, pub x25519: String, } -impl From for LegacyHostKeysV2 { +impl From for LegacyHostKeysV3 { fn from(value: HostKeys) -> Self { + LegacyHostKeysV3 { + ed25519_identity: value.ed25519_identity, + x25519_sphinx: value.primary_x25519_sphinx_key.public_key, + x25519_noise: value.x25519_noise, + } + } +} + +impl From for LegacyHostKeysV2 { + fn from(value: LegacyHostKeysV3) -> Self { LegacyHostKeysV2 { ed25519_identity: value.ed25519_identity.to_base58_string(), x25519_sphinx: value.x25519_sphinx.to_base58_string(), @@ -156,9 +277,9 @@ impl From for LegacyHostKeysV2 { } } -impl From for LegacyHostKeys { +impl From for LegacyHostKeysV1 { fn from(value: LegacyHostKeysV2) -> Self { - LegacyHostKeys { + LegacyHostKeysV1 { ed25519: value.ed25519_identity, x25519: value.x25519_sphinx, } @@ -267,3 +388,31 @@ pub struct AuxiliaryDetails { #[serde(default)] pub accepted_operator_terms_and_conditions: bool, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn legacy_host_information_deserialisation() { + let legacy_raw = r#" + { + "data": { + "ip_address": [ + "194.182.184.55" + ], + "hostname": null, + "keys": { + "ed25519_identity": "2RMWm7PoadaoWpk3KhT2tcFFfA4oKUyC44KwmVvjxNDS", + "x25519_sphinx": "Awn4R2AHX91tYeiMJMxW3mFfoePuHWzZYUFdDQnydZCD", + "x25519_noise": null + } + }, + "signature": "5JcXh766JANhz3bu2hMBS8onTLihQn6vnGgduJg1qd8JAcPGPbXBwBTKmmQPYCVGeZYFHW4CMGhfHVBu2A1rE5f7" + } + "#; + + let res = serde_json::from_str::(legacy_raw); + assert!(res.is_ok()); + } +} diff --git a/nym-node/src/cli/commands/mod.rs b/nym-node/src/cli/commands/mod.rs index dea762daf91..345a2143e79 100644 --- a/nym-node/src/cli/commands/mod.rs +++ b/nym-node/src/cli/commands/mod.rs @@ -5,6 +5,7 @@ pub(crate) mod bonding_information; pub(super) mod build_info; pub(super) mod migrate; pub(crate) mod node_details; +pub(crate) mod reset_sphinx_keys; pub(super) mod run; pub(super) mod sign; pub(crate) mod test_throughput; diff --git a/nym-node/src/cli/commands/reset_sphinx_keys.rs b/nym-node/src/cli/commands/reset_sphinx_keys.rs new file mode 100644 index 00000000000..379196b9343 --- /dev/null +++ b/nym-node/src/cli/commands/reset_sphinx_keys.rs @@ -0,0 +1,97 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::cli::helpers::ConfigArgs; +use crate::config::persistence::{ + DEFAULT_RD_BLOOMFILTER_FILE_EXT, DEFAULT_RD_BLOOMFILTER_FLUSH_FILE_EXT, +}; +use crate::config::upgrade_helpers::try_load_current_config; +use crate::node::helpers::get_current_rotation_id; +use crate::node::key_rotation::manager::SphinxKeyManager; +use nym_crypto::aes::cipher::crypto_common::rand_core::OsRng; +use std::fs; +use std::fs::read_dir; +use std::path::PathBuf; +use tracing::{info, warn}; + +#[derive(Debug, clap::Args)] +pub(crate) struct Args { + #[clap(flatten)] + pub(crate) config: ConfigArgs, +} + +fn clear_bloomfilters(dir: &PathBuf) -> anyhow::Result<()> { + let read_dir = read_dir(dir)?; + for entry in read_dir { + let entry = entry?; + let path = entry.path(); + let Some(extension) = path.extension() else { + continue; + }; + if extension == DEFAULT_RD_BLOOMFILTER_FILE_EXT + || extension == DEFAULT_RD_BLOOMFILTER_FLUSH_FILE_EXT + { + { + fs::remove_file(path)?; + } + } + } + + Ok(()) +} + +pub async fn execute(args: Args) -> anyhow::Result<()> { + let config = try_load_current_config(args.config.config_path()).await?; + + warn!("RESETTING SPHINX KEYS OF NODE {}", config.id); + + // 1. attempt to retrieve current rotation id + let current_rotation_id = + get_current_rotation_id(&config.mixnet.nym_api_urls, &config.mixnet.nyxd_urls).await?; + + // 2. remove all bloomfilters + info!("clearing old replay protection bloomfilters..."); + clear_bloomfilters( + &config + .mixnet + .replay_protection + .storage_paths + .current_bloomfilters_directory, + )?; + + // 3. remove primary and secondary keys. also a temporary key if it existed + info!("removing old keys..."); + let tmp_location = config + .storage_paths + .keys + .primary_x25519_sphinx_key_file + .with_extension("tmp"); + if tmp_location.exists() { + fs::remove_file(tmp_location)?; + } + + if config + .storage_paths + .keys + .secondary_x25519_sphinx_key_file + .exists() + { + fs::remove_file(&config.storage_paths.keys.secondary_x25519_sphinx_key_file)?; + } + + // no need to explicitly remove primary key as the file will be overwritten + + // 4. recreate primary key according to current rotation id + let mut rng = OsRng; + + info!("generating new key for rotation {current_rotation_id}..."); + let _ = SphinxKeyManager::initialise_new( + &mut rng, + current_rotation_id, + &config.storage_paths.keys.primary_x25519_sphinx_key_file, + &config.storage_paths.keys.secondary_x25519_sphinx_key_file, + )?; + + info!("done!"); + Ok(()) +} diff --git a/nym-node/src/cli/commands/sign.rs b/nym-node/src/cli/commands/sign.rs index 22ad977756f..a05db78c854 100644 --- a/nym-node/src/cli/commands/sign.rs +++ b/nym-node/src/cli/commands/sign.rs @@ -72,7 +72,7 @@ fn print_signed_contract_msg( pub async fn execute(args: Args) -> anyhow::Result<()> { let config = try_load_current_config(args.config.config_path()).await?; let identity_keypair = - load_ed25519_identity_keypair(config.storage_paths.keys.ed25519_identity_storage_paths())?; + load_ed25519_identity_keypair(&config.storage_paths.keys.ed25519_identity_storage_paths())?; // note: due to clap's ArgGroup, one (and only one) of those branches will be called if let Some(text) = args.text { diff --git a/nym-node/src/cli/mod.rs b/nym-node/src/cli/mod.rs index 40b05b4a840..37e79bcd8f6 100644 --- a/nym-node/src/cli/mod.rs +++ b/nym-node/src/cli/mod.rs @@ -2,7 +2,8 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::cli::commands::{ - bonding_information, build_info, migrate, node_details, run, sign, test_throughput, + bonding_information, build_info, migrate, node_details, reset_sphinx_keys, run, sign, + test_throughput, }; use crate::env::vars::{NYMNODE_CONFIG_ENV_FILE_ARG, NYMNODE_NO_BANNER_ARG}; use crate::logging::setup_tracing_logger; @@ -68,6 +69,9 @@ impl Cli { Commands::Migrate(args) => migrate::execute(*args)?, Commands::Sign(args) => { Self::execute_async(sign::execute(args))? }?, Commands::TestThroughput(args) => test_throughput::execute(args)?, + Commands::UnsafeResetSphinxKeys(args) => { + { Self::execute_async(reset_sphinx_keys::execute(args))? }? + } } Ok(()) } @@ -93,6 +97,9 @@ pub(crate) enum Commands { /// Use identity key of this node to sign provided message. Sign(sign::Args), + /// UNSAFE: reset existing sphinx keys and attempt to generate fresh one for the current network state + UnsafeResetSphinxKeys(reset_sphinx_keys::Args), + /// Attempt to approximate the maximum mixnet throughput if nym-node /// was running on this machine in mixnet mode #[clap(hide = true)] diff --git a/nym-node/src/config/mod.rs b/nym-node/src/config/mod.rs index cc7fd218866..9074f5697bd 100644 --- a/nym-node/src/config/mod.rs +++ b/nym-node/src/config/mod.rs @@ -529,6 +529,9 @@ pub struct Mixnet { /// Settings for controlling replay detection pub replay_protection: ReplayProtection, + #[serde(default)] + pub key_rotation: KeyRotation, + #[serde(default)] pub debug: MixnetDebug, } @@ -639,12 +642,6 @@ pub struct ReplayProtectionDebug { /// It's performed in case the traffic rates increase before the next bloomfilter update. pub bloomfilter_size_multiplier: f64, - // NOTE: this field is temporary until replay detection bloomfilter rotation is tied - // to key rotation - /// Specifies how often the bloomfilter is cleared - #[serde(with = "humantime_serde")] - pub bloomfilter_reset_rate: Duration, - /// Specifies how often the bloomfilter is flushed to disk for recovery in case of a crash #[serde(with = "humantime_serde")] pub bloomfilter_disk_flushing_rate: Duration, @@ -661,9 +658,6 @@ impl ReplayProtectionDebug { // 10^-5 pub const DEFAULT_REPLAY_DETECTION_FALSE_POSITIVE_RATE: f64 = 1e-5; - // 25h (key rotation will be happening every 24h + 1h of overlap) - pub const DEFAULT_REPLAY_DETECTION_BF_RESET_RATE: Duration = Duration::from_secs(25 * 60 * 60); - // we must have some reasonable balance between losing values and trashing the disk. // since on average HDD it would take ~30s to save a 2GB bloomfilter pub const DEFAULT_BF_DISK_FLUSHING_RATE: Duration = Duration::from_secs(10 * 60); @@ -680,8 +674,12 @@ impl ReplayProtectionDebug { )); } + // ideally we would have pulled the exact information from the network, + // but making async calls really doesn't play around with this method + // so we do second best: assume 24h rotation with 1h overlap (which realistically won't ever change) + let items_in_filter = items_in_bloomfilter( - self.bloomfilter_reset_rate, + Duration::from_secs(25 * 60 * 60), self.initial_expected_packets_per_second, ); let bitmap_size = bitmap_size(self.false_positive_rate, items_in_filter); @@ -717,12 +715,37 @@ impl Default for ReplayProtectionDebug { bloomfilter_minimum_packets_per_second_size: Self::DEFAULT_BLOOMFILTER_MINIMUM_PACKETS_PER_SECOND_SIZE, bloomfilter_size_multiplier: Self::DEFAULT_BLOOMFILTER_SIZE_MULTIPLIER, - bloomfilter_reset_rate: Self::DEFAULT_REPLAY_DETECTION_BF_RESET_RATE, bloomfilter_disk_flushing_rate: Self::DEFAULT_BF_DISK_FLUSHING_RATE, } } } +#[derive(Debug, Default, Copy, Clone, Deserialize, PartialEq, Serialize)] +#[serde(default)] +pub struct KeyRotation { + pub debug: KeyRotationDebug, +} + +#[derive(Debug, Copy, Clone, Deserialize, PartialEq, Serialize)] +#[serde(default)] +pub struct KeyRotationDebug { + /// Specifies how often the node should poll for any changes in the key rotation global state. + #[serde(with = "humantime_serde")] + pub rotation_state_poling_interval: Duration, +} + +impl KeyRotationDebug { + pub const DEFAULT_ROTATION_STATE_POLLING_INTERVAL: Duration = Duration::from_secs(4 * 60 * 60); +} + +impl Default for KeyRotationDebug { + fn default() -> Self { + KeyRotationDebug { + rotation_state_poling_interval: Self::DEFAULT_ROTATION_STATE_POLLING_INTERVAL, + } + } +} + impl MixnetDebug { // given that genuine clients are using mean delay of 50ms, // the probability of them delaying for over 10s is 10^-87 @@ -773,6 +796,7 @@ impl Mixnet { nym_api_urls, nyxd_urls, replay_protection: ReplayProtection::new_default(data_dir), + key_rotation: Default::default(), debug: Default::default(), } } diff --git a/nym-node/src/config/old_configs/mod.rs b/nym-node/src/config/old_configs/mod.rs index 5842ab31464..48c1ede17b1 100644 --- a/nym-node/src/config/old_configs/mod.rs +++ b/nym-node/src/config/old_configs/mod.rs @@ -9,6 +9,7 @@ mod old_config_v5; mod old_config_v6; mod old_config_v7; mod old_config_v8; +mod old_config_v9; pub use old_config_v1::try_upgrade_config_v1; pub use old_config_v2::try_upgrade_config_v2; @@ -18,3 +19,4 @@ pub use old_config_v5::try_upgrade_config_v5; pub use old_config_v6::try_upgrade_config_v6; pub use old_config_v7::try_upgrade_config_v7; pub use old_config_v8::try_upgrade_config_v8; +pub use old_config_v9::try_upgrade_config_v9; diff --git a/nym-node/src/config/old_configs/old_config_v3.rs b/nym-node/src/config/old_configs/old_config_v3.rs index 6796f1aa8cf..8df7a5a96ab 100644 --- a/nym-node/src/config/old_configs/old_config_v3.rs +++ b/nym-node/src/config/old_configs/old_config_v3.rs @@ -203,22 +203,6 @@ pub struct KeysPathsV3 { } impl KeysPathsV3 { - pub fn new>(data_dir: P) -> Self { - let data_dir = data_dir.as_ref(); - - KeysPathsV3 { - private_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PRIVATE_IDENTITY_KEY_FILENAME), - public_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PUBLIC_IDENTITY_KEY_FILENAME), - private_x25519_sphinx_key_file: data_dir - .join(DEFAULT_X25519_PRIVATE_SPHINX_KEY_FILENAME), - public_x25519_sphinx_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_SPHINX_KEY_FILENAME), - private_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PRIVATE_NOISE_KEY_FILENAME), - public_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_NOISE_KEY_FILENAME), - } - } - pub fn ed25519_identity_storage_paths(&self) -> nym_pemstore::KeyPairPath { nym_pemstore::KeyPairPath::new( &self.private_ed25519_identity_key_file, diff --git a/nym-node/src/config/old_configs/old_config_v4.rs b/nym-node/src/config/old_configs/old_config_v4.rs index fb6d3aaa302..98912cbb9ef 100644 --- a/nym-node/src/config/old_configs/old_config_v4.rs +++ b/nym-node/src/config/old_configs/old_config_v4.rs @@ -212,45 +212,6 @@ pub struct KeysPathsV4 { pub public_x25519_noise_key_file: PathBuf, } -impl KeysPathsV4 { - pub fn new>(data_dir: P) -> Self { - let data_dir = data_dir.as_ref(); - - KeysPathsV4 { - private_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PRIVATE_IDENTITY_KEY_FILENAME), - public_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PUBLIC_IDENTITY_KEY_FILENAME), - private_x25519_sphinx_key_file: data_dir - .join(DEFAULT_X25519_PRIVATE_SPHINX_KEY_FILENAME), - public_x25519_sphinx_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_SPHINX_KEY_FILENAME), - private_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PRIVATE_NOISE_KEY_FILENAME), - public_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_NOISE_KEY_FILENAME), - } - } - - pub fn ed25519_identity_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_ed25519_identity_key_file, - &self.public_ed25519_identity_key_file, - ) - } - - pub fn x25519_sphinx_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_x25519_sphinx_key_file, - &self.public_x25519_sphinx_key_file, - ) - } - - pub fn x25519_noise_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_x25519_noise_key_file, - &self.public_x25519_noise_key_file, - ) - } -} - #[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] #[serde(deny_unknown_fields)] pub struct NymNodePathsV4 { diff --git a/nym-node/src/config/old_configs/old_config_v5.rs b/nym-node/src/config/old_configs/old_config_v5.rs index 09848023583..5dbacdaedb7 100644 --- a/nym-node/src/config/old_configs/old_config_v5.rs +++ b/nym-node/src/config/old_configs/old_config_v5.rs @@ -212,45 +212,6 @@ pub struct KeysPathsV5 { pub public_x25519_noise_key_file: PathBuf, } -impl KeysPathsV5 { - pub fn new>(data_dir: P) -> Self { - let data_dir = data_dir.as_ref(); - - KeysPathsV5 { - private_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PRIVATE_IDENTITY_KEY_FILENAME), - public_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PUBLIC_IDENTITY_KEY_FILENAME), - private_x25519_sphinx_key_file: data_dir - .join(DEFAULT_X25519_PRIVATE_SPHINX_KEY_FILENAME), - public_x25519_sphinx_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_SPHINX_KEY_FILENAME), - private_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PRIVATE_NOISE_KEY_FILENAME), - public_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_NOISE_KEY_FILENAME), - } - } - - pub fn ed25519_identity_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_ed25519_identity_key_file, - &self.public_ed25519_identity_key_file, - ) - } - - pub fn x25519_sphinx_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_x25519_sphinx_key_file, - &self.public_x25519_sphinx_key_file, - ) - } - - pub fn x25519_noise_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_x25519_noise_key_file, - &self.public_x25519_noise_key_file, - ) - } -} - #[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] #[serde(deny_unknown_fields)] pub struct NymNodePathsV5 { diff --git a/nym-node/src/config/old_configs/old_config_v6.rs b/nym-node/src/config/old_configs/old_config_v6.rs index 7a3eb97295e..ebf3cc5b44f 100644 --- a/nym-node/src/config/old_configs/old_config_v6.rs +++ b/nym-node/src/config/old_configs/old_config_v6.rs @@ -232,45 +232,6 @@ pub struct KeysPathsV6 { pub public_x25519_noise_key_file: PathBuf, } -impl KeysPathsV6 { - pub fn new>(data_dir: P) -> Self { - let data_dir = data_dir.as_ref(); - - KeysPathsV6 { - private_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PRIVATE_IDENTITY_KEY_FILENAME), - public_ed25519_identity_key_file: data_dir - .join(DEFAULT_ED25519_PUBLIC_IDENTITY_KEY_FILENAME), - private_x25519_sphinx_key_file: data_dir - .join(DEFAULT_X25519_PRIVATE_SPHINX_KEY_FILENAME), - public_x25519_sphinx_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_SPHINX_KEY_FILENAME), - private_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PRIVATE_NOISE_KEY_FILENAME), - public_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_NOISE_KEY_FILENAME), - } - } - - pub fn ed25519_identity_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_ed25519_identity_key_file, - &self.public_ed25519_identity_key_file, - ) - } - - pub fn x25519_sphinx_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_x25519_sphinx_key_file, - &self.public_x25519_sphinx_key_file, - ) - } - - pub fn x25519_noise_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_x25519_noise_key_file, - &self.public_x25519_noise_key_file, - ) - } -} - #[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] #[serde(deny_unknown_fields)] pub struct NymNodePathsV6 { diff --git a/nym-node/src/config/old_configs/old_config_v8.rs b/nym-node/src/config/old_configs/old_config_v8.rs index f4fc89503e9..64f8f057bbd 100644 --- a/nym-node/src/config/old_configs/old_config_v8.rs +++ b/nym-node/src/config/old_configs/old_config_v8.rs @@ -1,12 +1,14 @@ // Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::config::authenticator::{Authenticator, AuthenticatorDebug}; -use crate::config::gateway_tasks::{ - ClientBandwidthDebug, StaleMessageDebug, ZkNymTicketHandlerDebug, -}; -use crate::config::service_providers::{ - IpPacketRouter, IpPacketRouterDebug, NetworkRequester, NetworkRequesterDebug, +use crate::config::old_configs::old_config_v9::{ + AuthenticatorDebugV9, AuthenticatorPathsV9, AuthenticatorV9, ClientBandwidthDebugV9, ConfigV9, + GatewayTasksConfigDebugV9, GatewayTasksConfigV9, GatewayTasksPathsV9, HostV9, HttpV9, + IpPacketRouterDebugV9, IpPacketRouterPathsV9, IpPacketRouterV9, KeysPathsV9, LoggingSettingsV9, + MixnetDebugV9, MixnetV9, NetworkRequesterDebugV9, NetworkRequesterPathsV9, NetworkRequesterV9, + NodeModesV9, NymNodePathsV9, ReplayProtectionV9, ServiceProvidersConfigDebugV9, + ServiceProvidersConfigV9, ServiceProvidersPathsV9, StaleMessageDebugV9, VerlocDebugV9, + VerlocV9, WireguardPathsV9, WireguardV9, ZkNymTicketHandlerDebugV9, }; use crate::config::*; use crate::error::NymNodeError; @@ -20,7 +22,6 @@ use nym_config::{ serde_helpers::{de_maybe_port, de_maybe_stringified}, }; use nym_config::{parse_urls, read_config_from_toml_file}; -use persistence::*; use serde::{Deserialize, Serialize}; use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; @@ -1189,7 +1190,7 @@ impl ConfigV8 { pub async fn try_upgrade_config_v8>( path: P, prev_config: Option, -) -> Result { +) -> Result { debug!("attempting to load v8 config..."); let old_cfg = if let Some(prev_config) = prev_config { @@ -1205,26 +1206,26 @@ pub async fn try_upgrade_config_v8>( .parent() .ok_or(NymNodeError::DataDirDerivationFailure)?; - let cfg = Config { + let cfg = ConfigV9 { save_path: old_cfg.save_path, id: old_cfg.id, - modes: NodeModes { + modes: NodeModesV9 { mixnode: old_cfg.modes.mixnode, entry: old_cfg.modes.entry, exit: old_cfg.modes.exit, }, - host: Host { + host: HostV9 { public_ips: old_cfg.host.public_ips, hostname: old_cfg.host.hostname, location: old_cfg.host.location, }, - mixnet: Mixnet { + mixnet: MixnetV9 { bind_address: old_cfg.mixnet.bind_address, announce_port: old_cfg.mixnet.announce_port, nym_api_urls: old_cfg.mixnet.nym_api_urls, nyxd_urls: old_cfg.mixnet.nyxd_urls, - replay_protection: ReplayProtection::new_default(data_dir), - debug: MixnetDebug { + replay_protection: ReplayProtectionV9::new_default(data_dir), + debug: MixnetDebugV9 { maximum_forward_packet_delay: old_cfg.mixnet.debug.maximum_forward_packet_delay, packet_forwarding_initial_backoff: old_cfg .mixnet @@ -1239,8 +1240,8 @@ pub async fn try_upgrade_config_v8>( unsafe_disable_noise: old_cfg.mixnet.debug.unsafe_disable_noise, }, }, - storage_paths: NymNodePaths { - keys: KeysPaths { + storage_paths: NymNodePathsV9 { + keys: KeysPathsV9 { private_ed25519_identity_key_file: old_cfg .storage_paths .keys @@ -1268,7 +1269,7 @@ pub async fn try_upgrade_config_v8>( }, description: old_cfg.storage_paths.description, }, - http: Http { + http: HttpV9 { bind_address: old_cfg.http.bind_address, landing_page_assets_path: old_cfg.http.landing_page_assets_path, access_token: old_cfg.http.access_token, @@ -1277,10 +1278,10 @@ pub async fn try_upgrade_config_v8>( expose_crypto_hardware: old_cfg.http.expose_crypto_hardware, node_load_cache_ttl: old_cfg.http.node_load_cache_ttl, }, - verloc: Verloc { + verloc: VerlocV9 { bind_address: old_cfg.verloc.bind_address, announce_port: old_cfg.verloc.announce_port, - debug: VerlocDebug { + debug: VerlocDebugV9 { packets_per_node: old_cfg.verloc.debug.packets_per_node, connection_timeout: old_cfg.verloc.debug.connection_timeout, packet_timeout: old_cfg.verloc.debug.packet_timeout, @@ -1290,7 +1291,7 @@ pub async fn try_upgrade_config_v8>( retry_timeout: old_cfg.verloc.debug.retry_timeout, }, }, - wireguard: Wireguard { + wireguard: WireguardV9 { enabled: old_cfg.wireguard.enabled, bind_address: old_cfg.wireguard.bind_address, private_ipv4: old_cfg.wireguard.private_ipv4, @@ -1298,7 +1299,7 @@ pub async fn try_upgrade_config_v8>( announced_port: old_cfg.wireguard.announced_port, private_network_prefix_v4: old_cfg.wireguard.private_network_prefix_v4, private_network_prefix_v6: old_cfg.wireguard.private_network_prefix_v6, - storage_paths: WireguardPaths { + storage_paths: WireguardPathsV9 { private_diffie_hellman_key_file: old_cfg .wireguard .storage_paths @@ -1309,8 +1310,8 @@ pub async fn try_upgrade_config_v8>( .public_diffie_hellman_key_file, }, }, - gateway_tasks: GatewayTasksConfig { - storage_paths: GatewayTasksPaths { + gateway_tasks: GatewayTasksConfigV9 { + storage_paths: GatewayTasksPathsV9 { clients_storage: old_cfg.gateway_tasks.storage_paths.clients_storage, stats_storage: old_cfg.gateway_tasks.storage_paths.stats_storage, cosmos_mnemonic: old_cfg.gateway_tasks.storage_paths.cosmos_mnemonic, @@ -1319,12 +1320,12 @@ pub async fn try_upgrade_config_v8>( ws_bind_address: old_cfg.gateway_tasks.ws_bind_address, announce_ws_port: old_cfg.gateway_tasks.announce_ws_port, announce_wss_port: old_cfg.gateway_tasks.announce_wss_port, - debug: gateway_tasks::Debug { + debug: GatewayTasksConfigDebugV9 { message_retrieval_limit: old_cfg.gateway_tasks.debug.message_retrieval_limit, maximum_open_connections: old_cfg.gateway_tasks.debug.maximum_open_connections, minimum_mix_performance: old_cfg.gateway_tasks.debug.minimum_mix_performance, max_request_timestamp_skew: old_cfg.gateway_tasks.debug.max_request_timestamp_skew, - stale_messages: StaleMessageDebug { + stale_messages: StaleMessageDebugV9 { cleaner_run_interval: old_cfg .gateway_tasks .debug @@ -1332,7 +1333,7 @@ pub async fn try_upgrade_config_v8>( .cleaner_run_interval, max_age: old_cfg.gateway_tasks.debug.stale_messages.max_age, }, - client_bandwidth: ClientBandwidthDebug { + client_bandwidth: ClientBandwidthDebugV9 { max_flushing_rate: old_cfg .gateway_tasks .debug @@ -1344,7 +1345,7 @@ pub async fn try_upgrade_config_v8>( .client_bandwidth .max_delta_flushing_amount, }, - zk_nym_tickets: ZkNymTicketHandlerDebug { + zk_nym_tickets: ZkNymTicketHandlerDebugV9 { revocation_bandwidth_penalty: old_cfg .gateway_tasks .debug @@ -1369,11 +1370,11 @@ pub async fn try_upgrade_config_v8>( }, }, }, - service_providers: ServiceProvidersConfig { - storage_paths: ServiceProvidersPaths { + service_providers: ServiceProvidersConfigV9 { + storage_paths: ServiceProvidersPathsV9 { clients_storage: old_cfg.service_providers.storage_paths.clients_storage, stats_storage: old_cfg.service_providers.storage_paths.stats_storage, - network_requester: NetworkRequesterPaths { + network_requester: NetworkRequesterPathsV9 { private_ed25519_identity_key_file: old_cfg .service_providers .storage_paths @@ -1410,7 +1411,7 @@ pub async fn try_upgrade_config_v8>( .network_requester .gateway_registrations, }, - ip_packet_router: IpPacketRouterPaths { + ip_packet_router: IpPacketRouterPathsV9 { private_ed25519_identity_key_file: old_cfg .service_providers .storage_paths @@ -1447,7 +1448,7 @@ pub async fn try_upgrade_config_v8>( .ip_packet_router .gateway_registrations, }, - authenticator: AuthenticatorPaths { + authenticator: AuthenticatorPathsV9 { private_ed25519_identity_key_file: old_cfg .service_providers .storage_paths @@ -1487,8 +1488,8 @@ pub async fn try_upgrade_config_v8>( }, open_proxy: old_cfg.service_providers.open_proxy, upstream_exit_policy_url: old_cfg.service_providers.upstream_exit_policy_url, - network_requester: NetworkRequester { - debug: NetworkRequesterDebug { + network_requester: NetworkRequesterV9 { + debug: NetworkRequesterDebugV9 { enabled: old_cfg.service_providers.network_requester.debug.enabled, disable_poisson_rate: old_cfg .service_providers @@ -1502,8 +1503,8 @@ pub async fn try_upgrade_config_v8>( .client_debug, }, }, - ip_packet_router: IpPacketRouter { - debug: IpPacketRouterDebug { + ip_packet_router: IpPacketRouterV9 { + debug: IpPacketRouterDebugV9 { enabled: old_cfg.service_providers.ip_packet_router.debug.enabled, disable_poisson_rate: old_cfg .service_providers @@ -1517,8 +1518,8 @@ pub async fn try_upgrade_config_v8>( .client_debug, }, }, - authenticator: Authenticator { - debug: AuthenticatorDebug { + authenticator: AuthenticatorV9 { + debug: AuthenticatorDebugV9 { enabled: old_cfg.service_providers.authenticator.debug.enabled, disable_poisson_rate: old_cfg .service_providers @@ -1528,12 +1529,12 @@ pub async fn try_upgrade_config_v8>( client_debug: old_cfg.service_providers.authenticator.debug.client_debug, }, }, - debug: service_providers::Debug { + debug: ServiceProvidersConfigDebugV9 { message_retrieval_limit: old_cfg.service_providers.debug.message_retrieval_limit, }, }, metrics: Default::default(), - logging: LoggingSettings {}, + logging: LoggingSettingsV9 {}, debug: Default::default(), }; Ok(cfg) diff --git a/nym-node/src/config/old_configs/old_config_v9.rs b/nym-node/src/config/old_configs/old_config_v9.rs new file mode 100644 index 00000000000..058edfc61e6 --- /dev/null +++ b/nym-node/src/config/old_configs/old_config_v9.rs @@ -0,0 +1,1714 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::config::authenticator::{Authenticator, AuthenticatorDebug}; +use crate::config::gateway_tasks::{ + ClientBandwidthDebug, StaleMessageDebug, ZkNymTicketHandlerDebug, +}; +use crate::config::persistence::{ + AuthenticatorPaths, GatewayTasksPaths, IpPacketRouterPaths, KeysPaths, NetworkRequesterPaths, + NymNodePaths, ReplayProtectionPaths, ServiceProvidersPaths, WireguardPaths, + DEFAULT_PRIMARY_X25519_SPHINX_KEY_FILENAME, DEFAULT_SECONDARY_X25519_SPHINX_KEY_FILENAME, +}; +use crate::config::service_providers::{ + IpPacketRouter, IpPacketRouterDebug, NetworkRequester, NetworkRequesterDebug, +}; +use crate::config::{ + gateway_tasks, service_providers, Config, GatewayTasksConfig, Host, Http, Mixnet, MixnetDebug, + NodeModes, ReplayProtection, ReplayProtectionDebug, ServiceProvidersConfig, Verloc, + VerlocDebug, Wireguard, DEFAULT_HTTP_PORT, +}; +use crate::error::{KeyIOFailure, NymNodeError}; +use crate::node::helpers::{get_current_rotation_id, load_key, store_key}; +use crate::node::key_rotation::key::SphinxPrivateKey; +use celes::Country; +use clap::ValueEnum; +use nym_bin_common::logging::LoggingSettings; +use nym_client_core_config_types::DebugConfig as ClientDebugConfig; +use nym_config::defaults::DEFAULT_VERLOC_LISTENING_PORT; +use nym_config::helpers::{in6addr_any_init, inaddr_any}; +use nym_config::{ + defaults::TICKETBOOK_VALIDITY_DAYS, + read_config_from_toml_file, + serde_helpers::{de_maybe_port, de_maybe_stringified}, +}; +use nym_crypto::asymmetric::x25519; +use serde::{Deserialize, Serialize}; +use std::fs; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::path::{Path, PathBuf}; +use std::time::Duration; +use tracing::{debug, instrument}; +use url::Url; + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct WireguardPathsV9 { + pub private_diffie_hellman_key_file: PathBuf, + pub public_diffie_hellman_key_file: PathBuf, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct WireguardV9 { + /// Specifies whether the wireguard service is enabled on this node. + pub enabled: bool, + + /// Socket address this node will use for binding its wireguard interface. + /// default: `[::]:51822` + pub bind_address: SocketAddr, + + /// Private IPv4 address of the wireguard gateway. + /// default: `10.1.0.1` + pub private_ipv4: Ipv4Addr, + + /// Private IPv6 address of the wireguard gateway. + /// default: `fc01::1` + pub private_ipv6: Ipv6Addr, + + /// Port announced to external clients wishing to connect to the wireguard interface. + /// Useful in the instances where the node is behind a proxy. + pub announced_port: u16, + + /// The prefix denoting the maximum number of the clients that can be connected via Wireguard using IPv4. + /// The maximum value for IPv4 is 32 + pub private_network_prefix_v4: u8, + + /// The prefix denoting the maximum number of the clients that can be connected via Wireguard using IPv6. + /// The maximum value for IPv6 is 128 + pub private_network_prefix_v6: u8, + + /// Paths for wireguard keys, client registries, etc. + pub storage_paths: WireguardPathsV9, +} + +// a temporary solution until all "types" are run at the same time +#[derive(Debug, Default, Serialize, Deserialize, ValueEnum, Clone, Copy)] +#[serde(rename_all = "snake_case")] +pub enum NodeModeV9 { + #[default] + #[clap(alias = "mix")] + Mixnode, + + #[clap(alias = "entry", alias = "gateway")] + EntryGateway, + + // to not break existing behaviour, this means exit capabilities AND entry capabilities + #[clap(alias = "exit")] + ExitGateway, + + // will start only SP needed for exit capabilities WITHOUT entry routing + ExitProvidersOnly, +} + +impl From for NodeModes { + fn from(config: NodeModeV9) -> Self { + match config { + NodeModeV9::Mixnode => *NodeModes::default().with_mixnode(), + NodeModeV9::EntryGateway => *NodeModes::default().with_entry(), + // in old version exit implied entry + NodeModeV9::ExitGateway => *NodeModes::default().with_entry().with_exit(), + NodeModeV9::ExitProvidersOnly => *NodeModes::default().with_exit(), + } + } +} + +#[derive(Debug, Default, Serialize, Deserialize, Clone, Copy)] +pub struct NodeModesV9 { + /// Specifies whether this node can operate in a mixnode mode. + pub mixnode: bool, + + /// Specifies whether this node can operate in an entry mode. + pub entry: bool, + + /// Specifies whether this node can operate in an exit mode. + pub exit: bool, + // TODO: would it make sense to also put WG here for completion? +} + +// TODO: this is very much a WIP. we need proper ssl certificate support here +#[derive(Debug, Clone, Default, Deserialize, PartialEq, Serialize)] +#[serde(default)] +#[serde(deny_unknown_fields)] +pub struct HostV9 { + /// Ip address(es) of this host, such as 1.1.1.1 that external clients will use for connections. + /// If no values are provided, when this node gets included in the network, + /// its ip addresses will be populated by whatever value is resolved by associated nym-api. + pub public_ips: Vec, + + /// Optional hostname of this node, for example nymtech.net. + // TODO: this is temporary. to be replaced by pulling the data directly from the certs. + #[serde(deserialize_with = "de_maybe_stringified")] + pub hostname: Option, + + /// Optional ISO 3166 alpha-2 two-letter country code of the node's **physical** location + #[serde(deserialize_with = "de_maybe_stringified")] + pub location: Option, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +#[serde(default)] +#[serde(deny_unknown_fields)] +pub struct MixnetDebugV9 { + /// Specifies the duration of time this node is willing to delay a forward packet for. + #[serde(with = "humantime_serde")] + pub maximum_forward_packet_delay: Duration, + + /// Initial value of an exponential backoff to reconnect to dropped TCP connection when + /// forwarding sphinx packets. + #[serde(with = "humantime_serde")] + pub packet_forwarding_initial_backoff: Duration, + + /// Maximum value of an exponential backoff to reconnect to dropped TCP connection when + /// forwarding sphinx packets. + #[serde(with = "humantime_serde")] + pub packet_forwarding_maximum_backoff: Duration, + + /// Timeout for establishing initial connection when trying to forward a sphinx packet. + #[serde(with = "humantime_serde")] + pub initial_connection_timeout: Duration, + + /// Maximum number of packets that can be stored waiting to get sent to a particular connection. + pub maximum_connection_buffer_size: usize, + + /// Specifies whether this node should **NOT** use noise protocol in the connections (currently not implemented) + pub unsafe_disable_noise: bool, +} + +impl MixnetDebugV9 { + // given that genuine clients are using mean delay of 50ms, + // the probability of them delaying for over 10s is 10^-87 + // which for all intents and purposes will never happen + pub(crate) const DEFAULT_MAXIMUM_FORWARD_PACKET_DELAY: Duration = Duration::from_secs(10); + pub(crate) const DEFAULT_PACKET_FORWARDING_INITIAL_BACKOFF: Duration = + Duration::from_millis(10_000); + pub(crate) const DEFAULT_PACKET_FORWARDING_MAXIMUM_BACKOFF: Duration = + Duration::from_millis(300_000); + pub(crate) const DEFAULT_INITIAL_CONNECTION_TIMEOUT: Duration = Duration::from_millis(1_500); + pub(crate) const DEFAULT_MAXIMUM_CONNECTION_BUFFER_SIZE: usize = 2000; +} + +impl Default for MixnetDebugV9 { + fn default() -> Self { + MixnetDebugV9 { + maximum_forward_packet_delay: Self::DEFAULT_MAXIMUM_FORWARD_PACKET_DELAY, + packet_forwarding_initial_backoff: Self::DEFAULT_PACKET_FORWARDING_INITIAL_BACKOFF, + packet_forwarding_maximum_backoff: Self::DEFAULT_PACKET_FORWARDING_MAXIMUM_BACKOFF, + initial_connection_timeout: Self::DEFAULT_INITIAL_CONNECTION_TIMEOUT, + maximum_connection_buffer_size: Self::DEFAULT_MAXIMUM_CONNECTION_BUFFER_SIZE, + // to be changed by @SW once the implementation is there + unsafe_disable_noise: true, + } + } +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct MixnetV9 { + /// Address this node will bind to for listening for mixnet packets + /// default: `[::]:1789` + pub bind_address: SocketAddr, + + /// If applicable, custom port announced in the self-described API that other clients and nodes + /// will use. + /// Useful when the node is behind a proxy. + #[serde(deserialize_with = "de_maybe_port")] + pub announce_port: Option, + + /// Addresses to nym APIs from which the node gets the view of the network. + pub nym_api_urls: Vec, + + /// Addresses to nyxd which the node uses to interact with the nyx chain. + pub nyxd_urls: Vec, + + /// Settings for controlling replay detection + pub replay_protection: ReplayProtectionV9, + + #[serde(default)] + pub debug: MixnetDebugV9, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +pub struct ReplayProtectionV9 { + /// Paths for current bloomfilters + pub storage_paths: ReplayProtectionPathsV9, + + #[serde(default)] + pub debug: ReplayProtectionDebugV9, +} + +impl ReplayProtectionV9 { + pub fn new_default>(data_dir: P) -> Self { + ReplayProtectionV9 { + storage_paths: ReplayProtectionPathsV9::new(data_dir), + debug: Default::default(), + } + } +} + +pub const DEFAULT_RD_BLOOMFILTER_SUBDIR: &str = "replay-detection"; + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ReplayProtectionPathsV9 { + /// Path to the directory storing currently used bloomfilter(s). + pub current_bloomfilters_directory: PathBuf, +} + +impl ReplayProtectionPathsV9 { + pub fn new>(data_dir: P) -> Self { + ReplayProtectionPathsV9 { + current_bloomfilters_directory: data_dir.as_ref().join(DEFAULT_RD_BLOOMFILTER_SUBDIR), + } + } +} + +#[derive(Debug, Copy, Clone, Deserialize, PartialEq, Serialize)] +#[serde(default)] +pub struct ReplayProtectionDebugV9 { + /// Specifies whether this node should **NOT** use replay protection + pub unsafe_disabled: bool, + + /// How long the processing task is willing to skip mutex acquisition before it will block the thread + /// until it actually obtains it + pub maximum_replay_detection_deferral: Duration, + + /// How many packets the processing task is willing to queue before it will block the thread + /// until it obtains the mutex + pub maximum_replay_detection_pending_packets: usize, + + /// Probability of false positives, fraction between 0 and 1 or a number indicating 1-in-p + pub false_positive_rate: f64, + + /// Defines initial expected number of packets this node will process a second, + /// so that an initial bloomfilter could be established. + /// As the node is running and BF are cleared, the value will be adjusted dynamically + pub initial_expected_packets_per_second: usize, + + /// Defines minimum expected number of packets this node will process a second + /// when used for calculating the BF size after reset. + /// This is to avoid degenerate cases where node receives 0 packets (because say it's misconfigured) + /// and it constructs an empty bloomfilter. + pub bloomfilter_minimum_packets_per_second_size: usize, + + /// Specifies the amount the bloomfilter size is going to get multiplied by after each reset. + /// It's performed in case the traffic rates increase before the next bloomfilter update. + pub bloomfilter_size_multiplier: f64, + + // NOTE: this field is temporary until replay detection bloomfilter rotation is tied + // to key rotation + /// Specifies how often the bloomfilter is cleared + #[serde(with = "humantime_serde")] + pub bloomfilter_reset_rate: Duration, + + /// Specifies how often the bloomfilter is flushed to disk for recovery in case of a crash + #[serde(with = "humantime_serde")] + pub bloomfilter_disk_flushing_rate: Duration, +} + +impl ReplayProtectionDebugV9 { + pub const DEFAULT_MAXIMUM_REPLAY_DETECTION_DEFERRAL: Duration = Duration::from_millis(50); + + pub const DEFAULT_MAXIMUM_REPLAY_DETECTION_PENDING_PACKETS: usize = 100; + + // 12% (completely arbitrary) + pub const DEFAULT_BLOOMFILTER_SIZE_MULTIPLIER: f64 = 1.12; + + // 10^-5 + pub const DEFAULT_REPLAY_DETECTION_FALSE_POSITIVE_RATE: f64 = 1e-5; + + // 25h (key rotation will be happening every 24h + 1h of overlap) + pub const DEFAULT_REPLAY_DETECTION_BF_RESET_RATE: Duration = Duration::from_secs(25 * 60 * 60); + + // we must have some reasonable balance between losing values and trashing the disk. + // since on average HDD it would take ~30s to save a 2GB bloomfilter + pub const DEFAULT_BF_DISK_FLUSHING_RATE: Duration = Duration::from_secs(10 * 60); + + // this value will have to be adjusted in the future + pub const DEFAULT_INITIAL_EXPECTED_PACKETS_PER_SECOND: usize = 2000; + + pub const DEFAULT_BLOOMFILTER_MINIMUM_PACKETS_PER_SECOND_SIZE: usize = 200; +} + +impl Default for ReplayProtectionDebugV9 { + fn default() -> Self { + ReplayProtectionDebugV9 { + unsafe_disabled: false, + maximum_replay_detection_deferral: Self::DEFAULT_MAXIMUM_REPLAY_DETECTION_DEFERRAL, + maximum_replay_detection_pending_packets: + Self::DEFAULT_MAXIMUM_REPLAY_DETECTION_PENDING_PACKETS, + false_positive_rate: Self::DEFAULT_REPLAY_DETECTION_FALSE_POSITIVE_RATE, + initial_expected_packets_per_second: Self::DEFAULT_INITIAL_EXPECTED_PACKETS_PER_SECOND, + bloomfilter_minimum_packets_per_second_size: + Self::DEFAULT_BLOOMFILTER_MINIMUM_PACKETS_PER_SECOND_SIZE, + bloomfilter_size_multiplier: Self::DEFAULT_BLOOMFILTER_SIZE_MULTIPLIER, + bloomfilter_reset_rate: Self::DEFAULT_REPLAY_DETECTION_BF_RESET_RATE, + bloomfilter_disk_flushing_rate: Self::DEFAULT_BF_DISK_FLUSHING_RATE, + } + } +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct KeysPathsV9 { + /// Path to file containing ed25519 identity private key. + pub private_ed25519_identity_key_file: PathBuf, + + /// Path to file containing ed25519 identity public key. + pub public_ed25519_identity_key_file: PathBuf, + + /// Path to file containing x25519 sphinx private key. + pub private_x25519_sphinx_key_file: PathBuf, + + /// Path to file containing x25519 sphinx public key. + pub public_x25519_sphinx_key_file: PathBuf, + + /// Path to file containing x25519 noise private key. + pub private_x25519_noise_key_file: PathBuf, + + /// Path to file containing x25519 noise public key. + pub public_x25519_noise_key_file: PathBuf, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NymNodePathsV9 { + pub keys: KeysPathsV9, + + /// Path to a file containing basic node description: human-readable name, website, details, etc. + pub description: PathBuf, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +#[serde(default)] +#[serde(deny_unknown_fields)] +pub struct HttpV9 { + /// Socket address this node will use for binding its http API. + /// default: `[::]:8080` + pub bind_address: SocketAddr, + + /// Path to assets directory of custom landing page of this node. + #[serde(deserialize_with = "de_maybe_stringified")] + pub landing_page_assets_path: Option, + + /// An optional bearer token for accessing certain http endpoints. + /// Currently only used for obtaining mixnode's stats. + #[serde(default)] + pub access_token: Option, + + /// Specify whether basic system information should be exposed. + /// default: true + pub expose_system_info: bool, + + /// Specify whether basic system hardware information should be exposed. + /// This option is superseded by `expose_system_info` + /// default: true + pub expose_system_hardware: bool, + + /// Specify whether detailed system crypto hardware information should be exposed. + /// This option is superseded by `expose_system_hardware` + /// default: true + pub expose_crypto_hardware: bool, + + /// Specify the cache ttl of the node load. + /// default: 30s + #[serde(with = "humantime_serde")] + pub node_load_cache_ttl: Duration, +} + +impl HttpV9 { + pub const DEFAULT_NODE_LOAD_CACHE_TTL: Duration = Duration::from_secs(30); +} + +impl Default for HttpV9 { + fn default() -> Self { + HttpV9 { + bind_address: SocketAddr::new(inaddr_any(), DEFAULT_HTTP_PORT), + landing_page_assets_path: None, + access_token: None, + expose_system_info: true, + expose_system_hardware: true, + expose_crypto_hardware: true, + node_load_cache_ttl: Self::DEFAULT_NODE_LOAD_CACHE_TTL, + } + } +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct MixnodePathsV9 {} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct DebugV9 { + /// Delay between each subsequent node statistics being logged to the console + #[serde(with = "humantime_serde")] + pub node_stats_logging_delay: Duration, + + /// Delay between each subsequent node statistics being updated + #[serde(with = "humantime_serde")] + pub node_stats_updating_delay: Duration, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct VerlocDebugV9 { + /// Specifies number of echo packets sent to each node during a measurement run. + pub packets_per_node: usize, + + /// Specifies maximum amount of time to wait for the connection to get established. + #[serde(with = "humantime_serde")] + pub connection_timeout: Duration, + + /// Specifies maximum amount of time to wait for the reply packet to arrive before abandoning the test. + #[serde(with = "humantime_serde")] + pub packet_timeout: Duration, + + /// Specifies delay between subsequent test packets being sent (after receiving a reply). + #[serde(with = "humantime_serde")] + pub delay_between_packets: Duration, + + /// Specifies number of nodes being tested at once. + pub tested_nodes_batch_size: usize, + + /// Specifies delay between subsequent test runs. + #[serde(with = "humantime_serde")] + pub testing_interval: Duration, + + /// Specifies delay between attempting to run the measurement again if the previous run failed + /// due to being unable to get the list of nodes. + #[serde(with = "humantime_serde")] + pub retry_timeout: Duration, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct VerlocV9 { + /// Socket address this node will use for binding its verloc API. + /// default: `[::]:1790` + pub bind_address: SocketAddr, + + /// If applicable, custom port announced in the self-described API that other clients and nodes + /// will use. + /// Useful when the node is behind a proxy. + #[serde(deserialize_with = "de_maybe_port")] + #[serde(default)] + pub announce_port: Option, + + #[serde(default)] + pub debug: VerlocDebugV9, +} + +impl VerlocV9 { + pub const DEFAULT_VERLOC_PORT: u16 = DEFAULT_VERLOC_LISTENING_PORT; +} + +impl Default for VerlocV9 { + fn default() -> Self { + VerlocV9 { + bind_address: SocketAddr::new(in6addr_any_init(), Self::DEFAULT_VERLOC_PORT), + announce_port: None, + debug: Default::default(), + } + } +} + +impl VerlocDebugV9 { + const DEFAULT_PACKETS_PER_NODE: usize = 100; + const DEFAULT_CONNECTION_TIMEOUT: Duration = Duration::from_millis(5000); + const DEFAULT_PACKET_TIMEOUT: Duration = Duration::from_millis(1500); + const DEFAULT_DELAY_BETWEEN_PACKETS: Duration = Duration::from_millis(50); + const DEFAULT_BATCH_SIZE: usize = 50; + const DEFAULT_TESTING_INTERVAL: Duration = Duration::from_secs(60 * 60 * 12); + const DEFAULT_RETRY_TIMEOUT: Duration = Duration::from_secs(60 * 30); +} + +impl Default for VerlocDebugV9 { + fn default() -> Self { + VerlocDebugV9 { + packets_per_node: Self::DEFAULT_PACKETS_PER_NODE, + connection_timeout: Self::DEFAULT_CONNECTION_TIMEOUT, + packet_timeout: Self::DEFAULT_PACKET_TIMEOUT, + delay_between_packets: Self::DEFAULT_DELAY_BETWEEN_PACKETS, + tested_nodes_batch_size: Self::DEFAULT_BATCH_SIZE, + testing_interval: Self::DEFAULT_TESTING_INTERVAL, + retry_timeout: Self::DEFAULT_RETRY_TIMEOUT, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct MixnodeConfigV9 { + pub storage_paths: MixnodePathsV9, + + pub verloc: VerlocV9, + + #[serde(default)] + pub debug: DebugV9, +} + +impl DebugV9 { + const DEFAULT_NODE_STATS_LOGGING_DELAY: Duration = Duration::from_millis(60_000); + const DEFAULT_NODE_STATS_UPDATING_DELAY: Duration = Duration::from_millis(30_000); +} + +impl Default for DebugV9 { + fn default() -> Self { + DebugV9 { + node_stats_logging_delay: Self::DEFAULT_NODE_STATS_LOGGING_DELAY, + node_stats_updating_delay: Self::DEFAULT_NODE_STATS_UPDATING_DELAY, + } + } +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct EntryGatewayPathsV9 { + /// Path to sqlite database containing all persistent data: messages for offline clients, + /// derived shared keys and available client bandwidths. + pub clients_storage: PathBuf, + + pub stats_storage: PathBuf, + + /// Path to file containing cosmos account mnemonic used for zk-nym redemption. + pub cosmos_mnemonic: PathBuf, + + pub authenticator: AuthenticatorPathsV9, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(default)] +pub struct ZkNymTicketHandlerDebugV9 { + /// Specifies the multiplier for revoking a malformed/double-spent ticket + /// (if it has to go all the way to the nym-api for verification) + /// e.g. if one ticket grants 100Mb and `revocation_bandwidth_penalty` is set to 1.5, + /// the client will lose 150Mb + pub revocation_bandwidth_penalty: f32, + + /// Specifies the interval for attempting to resolve any failed, pending operations, + /// such as ticket verification or redemption. + #[serde(with = "humantime_serde")] + pub pending_poller: Duration, + + pub minimum_api_quorum: f32, + + /// Specifies the minimum number of tickets this gateway will attempt to redeem. + pub minimum_redemption_tickets: usize, + + /// Specifies the maximum time between two subsequent tickets redemptions. + /// That's required as nym-apis will purge all ticket information for tickets older than maximum validity. + #[serde(with = "humantime_serde")] + pub maximum_time_between_redemption: Duration, +} + +impl ZkNymTicketHandlerDebugV9 { + pub const DEFAULT_REVOCATION_BANDWIDTH_PENALTY: f32 = 10.0; + pub const DEFAULT_PENDING_POLLER: Duration = Duration::from_secs(300); + pub const DEFAULT_MINIMUM_API_QUORUM: f32 = 0.8; + pub const DEFAULT_MINIMUM_REDEMPTION_TICKETS: usize = 100; + + // use min(4/5 of max validity, validity - 1), but making sure it's no greater than 1 day + // ASSUMPTION: our validity period is AT LEAST 2 days + // + // this could have been a constant, but it's more readable as a function + pub const fn default_maximum_time_between_redemption() -> Duration { + let desired_secs = TICKETBOOK_VALIDITY_DAYS * (86400 * 4) / 5; + let desired_secs_alt = (TICKETBOOK_VALIDITY_DAYS - 1) * 86400; + + // can't use `min` in const context + let target_secs = if desired_secs < desired_secs_alt { + desired_secs + } else { + desired_secs_alt + }; + + assert!( + target_secs > 86400, + "the maximum time between redemption can't be lower than 1 day!" + ); + Duration::from_secs(target_secs as u64) + } +} + +impl Default for ZkNymTicketHandlerDebugV9 { + fn default() -> Self { + ZkNymTicketHandlerDebugV9 { + revocation_bandwidth_penalty: Self::DEFAULT_REVOCATION_BANDWIDTH_PENALTY, + pending_poller: Self::DEFAULT_PENDING_POLLER, + minimum_api_quorum: Self::DEFAULT_MINIMUM_API_QUORUM, + minimum_redemption_tickets: Self::DEFAULT_MINIMUM_REDEMPTION_TICKETS, + maximum_time_between_redemption: Self::default_maximum_time_between_redemption(), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct EntryGatewayConfigDebugV9 { + /// Number of messages from offline client that can be pulled at once (i.e. with a single SQL query) from the storage. + pub message_retrieval_limit: i64, + pub zk_nym_tickets: ZkNymTicketHandlerDebugV9, +} + +impl EntryGatewayConfigDebugV9 { + const DEFAULT_MESSAGE_RETRIEVAL_LIMIT: i64 = 100; +} + +impl Default for EntryGatewayConfigDebugV9 { + fn default() -> Self { + EntryGatewayConfigDebugV9 { + message_retrieval_limit: Self::DEFAULT_MESSAGE_RETRIEVAL_LIMIT, + zk_nym_tickets: Default::default(), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct EntryGatewayConfigV9 { + pub storage_paths: EntryGatewayPathsV9, + + /// Indicates whether this gateway is accepting only coconut credentials for accessing the mixnet + /// or if it also accepts non-paying clients + pub enforce_zk_nyms: bool, + + /// Socket address this node will use for binding its client websocket API. + /// default: `[::]:9000` + pub bind_address: SocketAddr, + + /// Custom announced port for listening for websocket client traffic. + /// If unspecified, the value from the `bind_address` will be used instead + /// default: None + #[serde(deserialize_with = "de_maybe_port")] + pub announce_ws_port: Option, + + /// If applicable, announced port for listening for secure websocket client traffic. + /// (default: None) + #[serde(deserialize_with = "de_maybe_port")] + pub announce_wss_port: Option, + + #[serde(default)] + pub debug: EntryGatewayConfigDebugV9, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct NetworkRequesterPathsV9 { + /// Path to file containing network requester ed25519 identity private key. + pub private_ed25519_identity_key_file: PathBuf, + + /// Path to file containing network requester ed25519 identity public key. + pub public_ed25519_identity_key_file: PathBuf, + + /// Path to file containing network requester x25519 diffie hellman private key. + pub private_x25519_diffie_hellman_key_file: PathBuf, + + /// Path to file containing network requester x25519 diffie hellman public key. + pub public_x25519_diffie_hellman_key_file: PathBuf, + + /// Path to file containing key used for encrypting and decrypting the content of an + /// acknowledgement so that nobody besides the client knows which packet it refers to. + pub ack_key_file: PathBuf, + + /// Path to the persistent store for received reply surbs, unused encryption keys and used sender tags. + pub reply_surb_database: PathBuf, + + /// Normally this is a path to the file containing information about gateways used by this client, + /// i.e. details such as their public keys, owner addresses or the network information. + /// but in this case it just has the basic information of "we're using custom gateway". + /// Due to how clients are started up, this file has to exist. + pub gateway_registrations: PathBuf, + // it's possible we might have to add credential storage here for return tickets +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IpPacketRouterPathsV9 { + /// Path to file containing ip packet router ed25519 identity private key. + pub private_ed25519_identity_key_file: PathBuf, + + /// Path to file containing ip packet router ed25519 identity public key. + pub public_ed25519_identity_key_file: PathBuf, + + /// Path to file containing ip packet router x25519 diffie hellman private key. + pub private_x25519_diffie_hellman_key_file: PathBuf, + + /// Path to file containing ip packet router x25519 diffie hellman public key. + pub public_x25519_diffie_hellman_key_file: PathBuf, + + /// Path to file containing key used for encrypting and decrypting the content of an + /// acknowledgement so that nobody besides the client knows which packet it refers to. + pub ack_key_file: PathBuf, + + /// Path to the persistent store for received reply surbs, unused encryption keys and used sender tags. + pub reply_surb_database: PathBuf, + + /// Normally this is a path to the file containing information about gateways used by this client, + /// i.e. details such as their public keys, owner addresses or the network information. + /// but in this case it just has the basic information of "we're using custom gateway". + /// Due to how clients are started up, this file has to exist. + pub gateway_registrations: PathBuf, + // it's possible we might have to add credential storage here for return tickets +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct AuthenticatorPathsV9 { + /// Path to file containing authenticator ed25519 identity private key. + pub private_ed25519_identity_key_file: PathBuf, + + /// Path to file containing authenticator ed25519 identity public key. + pub public_ed25519_identity_key_file: PathBuf, + + /// Path to file containing authenticator x25519 diffie hellman private key. + pub private_x25519_diffie_hellman_key_file: PathBuf, + + /// Path to file containing authenticator x25519 diffie hellman public key. + pub public_x25519_diffie_hellman_key_file: PathBuf, + + /// Path to file containing key used for encrypting and decrypting the content of an + /// acknowledgement so that nobody besides the client knows which packet it refers to. + pub ack_key_file: PathBuf, + + /// Path to the persistent store for received reply surbs, unused encryption keys and used sender tags. + pub reply_surb_database: PathBuf, + + /// Normally this is a path to the file containing information about gateways used by this client, + /// i.e. details such as their public keys, owner addresses or the network information. + /// but in this case it just has the basic information of "we're using custom gateway". + /// Due to how clients are started up, this file has to exist. + pub gateway_registrations: PathBuf, + // it's possible we might have to add credential storage here for return tickets +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ExitGatewayPathsV9 { + pub clients_storage: PathBuf, + + pub stats_storage: PathBuf, + + pub network_requester: NetworkRequesterPathsV9, + + pub ip_packet_router: IpPacketRouterPathsV9, + + pub authenticator: AuthenticatorPathsV9, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +pub struct AuthenticatorV9 { + #[serde(default)] + pub debug: AuthenticatorDebugV9, +} + +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] +#[serde(default)] +pub struct AuthenticatorDebugV9 { + /// Specifies whether authenticator service is enabled in this process. + /// This is only here for debugging purposes as exit gateway should always run + /// the authenticator. + pub enabled: bool, + + /// Disable Poisson sending rate. + /// This is equivalent to setting client_debug.traffic.disable_main_poisson_packet_distribution = true + /// (or is it (?)) + pub disable_poisson_rate: bool, + + /// Shared detailed client configuration options + #[serde(flatten)] + pub client_debug: ClientDebugConfig, +} + +impl Default for AuthenticatorDebugV9 { + fn default() -> Self { + AuthenticatorDebugV9 { + enabled: true, + disable_poisson_rate: true, + client_debug: Default::default(), + } + } +} + +#[allow(clippy::derivable_impls)] +impl Default for AuthenticatorV9 { + fn default() -> Self { + AuthenticatorV9 { + debug: Default::default(), + } + } +} + +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] +#[serde(default)] +pub struct IpPacketRouterDebugV9 { + /// Specifies whether ip packet routing service is enabled in this process. + /// This is only here for debugging purposes as exit gateway should always run **both** + /// network requester and an ip packet router. + pub enabled: bool, + + /// Disable Poisson sending rate. + /// This is equivalent to setting client_debug.traffic.disable_main_poisson_packet_distribution = true + /// (or is it (?)) + pub disable_poisson_rate: bool, + + /// Shared detailed client configuration options + #[serde(flatten)] + pub client_debug: ClientDebugConfig, +} + +impl Default for IpPacketRouterDebugV9 { + fn default() -> Self { + IpPacketRouterDebugV9 { + enabled: true, + disable_poisson_rate: true, + client_debug: Default::default(), + } + } +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Serialize)] +pub struct IpPacketRouterV9 { + #[serde(default)] + pub debug: IpPacketRouterDebugV9, +} + +#[allow(clippy::derivable_impls)] +impl Default for IpPacketRouterV9 { + fn default() -> Self { + IpPacketRouterV9 { + debug: Default::default(), + } + } +} + +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] +pub struct NetworkRequesterDebugV9 { + /// Specifies whether network requester service is enabled in this process. + /// This is only here for debugging purposes as exit gateway should always run **both** + /// network requester and an ip packet router. + pub enabled: bool, + + /// Disable Poisson sending rate. + /// This is equivalent to setting client_debug.traffic.disable_main_poisson_packet_distribution = true + /// (or is it (?)) + pub disable_poisson_rate: bool, + + /// Shared detailed client configuration options + #[serde(flatten)] + pub client_debug: ClientDebugConfig, +} + +impl Default for NetworkRequesterDebugV9 { + fn default() -> Self { + NetworkRequesterDebugV9 { + enabled: true, + disable_poisson_rate: true, + client_debug: Default::default(), + } + } +} + +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] +pub struct NetworkRequesterV9 { + #[serde(default)] + pub debug: NetworkRequesterDebugV9, +} + +#[allow(clippy::derivable_impls)] +impl Default for NetworkRequesterV9 { + fn default() -> Self { + NetworkRequesterV9 { + debug: Default::default(), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ExitGatewayDebugV9 { + /// Number of messages from offline client that can be pulled at once (i.e. with a single SQL query) from the storage. + pub message_retrieval_limit: i64, +} + +impl ExitGatewayDebugV9 { + const DEFAULT_MESSAGE_RETRIEVAL_LIMIT: i64 = 100; +} + +impl Default for ExitGatewayDebugV9 { + fn default() -> Self { + ExitGatewayDebugV9 { + message_retrieval_limit: Self::DEFAULT_MESSAGE_RETRIEVAL_LIMIT, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ExitGatewayConfigV9 { + pub storage_paths: ExitGatewayPathsV9, + + /// specifies whether this exit node should run in 'open-proxy' mode + /// and thus would attempt to resolve **ANY** request it receives. + pub open_proxy: bool, + + /// Specifies the url for an upstream source of the exit policy used by this node. + pub upstream_exit_policy_url: Url, + + pub network_requester: NetworkRequesterV9, + + pub ip_packet_router: IpPacketRouterV9, + + #[serde(default)] + pub debug: ExitGatewayDebugV9, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct GatewayTasksPathsV9 { + /// Path to sqlite database containing all persistent data: messages for offline clients, + /// derived shared keys, available client bandwidths and wireguard peers. + pub clients_storage: PathBuf, + + /// Path to sqlite database containing all persistent stats data. + pub stats_storage: PathBuf, + + /// Path to file containing cosmos account mnemonic used for zk-nym redemption. + pub cosmos_mnemonic: PathBuf, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct StaleMessageDebugV9 { + /// Specifies how often the clean-up task should check for stale data. + #[serde(with = "humantime_serde")] + pub cleaner_run_interval: Duration, + + /// Specifies maximum age of stored messages before they are removed from the storage + #[serde(with = "humantime_serde")] + pub max_age: Duration, +} + +impl StaleMessageDebugV9 { + const DEFAULT_STALE_MESSAGES_CLEANER_RUN_INTERVAL: Duration = Duration::from_secs(60 * 60); + const DEFAULT_STALE_MESSAGES_MAX_AGE: Duration = Duration::from_secs(24 * 60 * 60); +} + +impl Default for StaleMessageDebugV9 { + fn default() -> Self { + StaleMessageDebugV9 { + cleaner_run_interval: Self::DEFAULT_STALE_MESSAGES_CLEANER_RUN_INTERVAL, + max_age: Self::DEFAULT_STALE_MESSAGES_MAX_AGE, + } + } +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct ClientBandwidthDebugV9 { + /// Defines maximum delay between client bandwidth information being flushed to the persistent storage. + pub max_flushing_rate: Duration, + + /// Defines a maximum change in client bandwidth before it gets flushed to the persistent storage. + pub max_delta_flushing_amount: i64, +} + +impl ClientBandwidthDebugV9 { + const DEFAULT_CLIENT_BANDWIDTH_MAX_FLUSHING_RATE: Duration = Duration::from_millis(5); + const DEFAULT_CLIENT_BANDWIDTH_MAX_DELTA_FLUSHING_AMOUNT: i64 = 512 * 1024; // 512kB +} + +impl Default for ClientBandwidthDebugV9 { + fn default() -> Self { + ClientBandwidthDebugV9 { + max_flushing_rate: Self::DEFAULT_CLIENT_BANDWIDTH_MAX_FLUSHING_RATE, + max_delta_flushing_amount: Self::DEFAULT_CLIENT_BANDWIDTH_MAX_DELTA_FLUSHING_AMOUNT, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(default)] +pub struct GatewayTasksConfigDebugV9 { + /// Number of messages from offline client that can be pulled at once (i.e. with a single SQL query) from the storage. + pub message_retrieval_limit: i64, + + /// The maximum number of client connections the gateway will keep open at once. + pub maximum_open_connections: usize, + + /// Specifies the minimum performance of mixnodes in the network that are to be used in internal topologies + /// of the services providers + pub minimum_mix_performance: u8, + + /// Defines the timestamp skew of a signed authentication request before it's deemed too excessive to process. + #[serde(alias = "maximum_auth_request_age")] + pub max_request_timestamp_skew: Duration, + + pub stale_messages: StaleMessageDebugV9, + + pub client_bandwidth: ClientBandwidthDebugV9, + + pub zk_nym_tickets: ZkNymTicketHandlerDebugV9, +} + +impl GatewayTasksConfigDebugV9 { + pub const DEFAULT_MESSAGE_RETRIEVAL_LIMIT: i64 = 100; + pub const DEFAULT_MINIMUM_MIX_PERFORMANCE: u8 = 50; + pub const DEFAULT_MAXIMUM_AUTH_REQUEST_TIMESTAMP_SKEW: Duration = Duration::from_secs(120); + pub const DEFAULT_MAXIMUM_OPEN_CONNECTIONS: usize = 8192; +} + +impl Default for GatewayTasksConfigDebugV9 { + fn default() -> Self { + GatewayTasksConfigDebugV9 { + message_retrieval_limit: Self::DEFAULT_MESSAGE_RETRIEVAL_LIMIT, + maximum_open_connections: Self::DEFAULT_MAXIMUM_OPEN_CONNECTIONS, + max_request_timestamp_skew: Self::DEFAULT_MAXIMUM_AUTH_REQUEST_TIMESTAMP_SKEW, + minimum_mix_performance: Self::DEFAULT_MINIMUM_MIX_PERFORMANCE, + stale_messages: Default::default(), + client_bandwidth: Default::default(), + zk_nym_tickets: Default::default(), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct GatewayTasksConfigV9 { + pub storage_paths: GatewayTasksPathsV9, + + /// Indicates whether this gateway is accepting only zk-nym credentials for accessing the mixnet + /// or if it also accepts non-paying clients + pub enforce_zk_nyms: bool, + + /// Socket address this node will use for binding its client websocket API. + /// default: `[::]:9000` + pub ws_bind_address: SocketAddr, + + /// Custom announced port for listening for websocket client traffic. + /// If unspecified, the value from the `bind_address` will be used instead + /// default: None + #[serde(deserialize_with = "de_maybe_port")] + pub announce_ws_port: Option, + + /// If applicable, announced port for listening for secure websocket client traffic. + /// (default: None) + #[serde(deserialize_with = "de_maybe_port")] + pub announce_wss_port: Option, + + #[serde(default)] + pub debug: GatewayTasksConfigDebugV9, +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct ServiceProvidersPathsV9 { + /// Path to sqlite database containing all persistent data: messages for offline clients, + /// derived shared keys, available client bandwidths and wireguard peers. + pub clients_storage: PathBuf, + + /// Path to sqlite database containing all persistent stats data. + pub stats_storage: PathBuf, + + pub network_requester: NetworkRequesterPathsV9, + + pub ip_packet_router: IpPacketRouterPathsV9, + + pub authenticator: AuthenticatorPathsV9, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ServiceProvidersConfigDebugV9 { + /// Number of messages from offline client that can be pulled at once (i.e. with a single SQL query) from the storage. + pub message_retrieval_limit: i64, +} + +impl ServiceProvidersConfigDebugV9 { + const DEFAULT_MESSAGE_RETRIEVAL_LIMIT: i64 = 100; +} + +impl Default for ServiceProvidersConfigDebugV9 { + fn default() -> Self { + ServiceProvidersConfigDebugV9 { + message_retrieval_limit: Self::DEFAULT_MESSAGE_RETRIEVAL_LIMIT, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ServiceProvidersConfigV9 { + pub storage_paths: ServiceProvidersPathsV9, + + /// specifies whether this exit node should run in 'open-proxy' mode + /// and thus would attempt to resolve **ANY** request it receives. + pub open_proxy: bool, + + /// Specifies the url for an upstream source of the exit policy used by this node. + pub upstream_exit_policy_url: Url, + + pub network_requester: NetworkRequesterV9, + + pub ip_packet_router: IpPacketRouterV9, + + pub authenticator: AuthenticatorV9, + + #[serde(default)] + pub debug: ServiceProvidersConfigDebugV9, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct MetricsConfigV9 { + #[serde(default)] + pub debug: MetricsDebugV9, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct MetricsDebugV9 { + /// Specify whether running statistics of this node should be logged to the console. + pub log_stats_to_console: bool, + + /// Specify the rate of which the metrics aggregator should call the `on_update` methods of all its registered handlers. + #[serde(with = "humantime_serde")] + pub aggregator_update_rate: Duration, + + /// Specify the target rate of clearing old stale mixnet metrics. + #[serde(with = "humantime_serde")] + pub stale_mixnet_metrics_cleaner_rate: Duration, + + /// Specify the target rate of updating global prometheus counters. + #[serde(with = "humantime_serde")] + pub global_prometheus_counters_update_rate: Duration, + + /// Specify the target rate of updating egress packets pending delivery counter. + #[serde(with = "humantime_serde")] + pub pending_egress_packets_update_rate: Duration, + + /// Specify the rate of updating clients sessions + #[serde(with = "humantime_serde")] + pub clients_sessions_update_rate: Duration, + + /// If console logging is enabled, specify the interval at which that happens + #[serde(with = "humantime_serde")] + pub console_logging_update_interval: Duration, + + /// Specify the update rate of running stats for the legacy `/metrics/mixing` endpoint + #[serde(with = "humantime_serde")] + pub legacy_mixing_metrics_update_rate: Duration, +} + +impl MetricsDebugV9 { + const DEFAULT_CONSOLE_LOGGING_INTERVAL: Duration = Duration::from_millis(60_000); + const DEFAULT_LEGACY_MIXING_UPDATE_RATE: Duration = Duration::from_millis(30_000); + const DEFAULT_AGGREGATOR_UPDATE_RATE: Duration = Duration::from_secs(5); + const DEFAULT_STALE_MIXNET_METRICS_UPDATE_RATE: Duration = Duration::from_secs(3600); + const DEFAULT_CLIENT_SESSIONS_UPDATE_RATE: Duration = Duration::from_secs(3600); + const GLOBAL_PROMETHEUS_COUNTERS_UPDATE_INTERVAL: Duration = Duration::from_secs(30); + const DEFAULT_PENDING_EGRESS_PACKETS_UPDATE_RATE: Duration = Duration::from_secs(30); +} + +impl Default for MetricsDebugV9 { + fn default() -> Self { + MetricsDebugV9 { + log_stats_to_console: true, + console_logging_update_interval: Self::DEFAULT_CONSOLE_LOGGING_INTERVAL, + legacy_mixing_metrics_update_rate: Self::DEFAULT_LEGACY_MIXING_UPDATE_RATE, + aggregator_update_rate: Self::DEFAULT_AGGREGATOR_UPDATE_RATE, + stale_mixnet_metrics_cleaner_rate: Self::DEFAULT_STALE_MIXNET_METRICS_UPDATE_RATE, + global_prometheus_counters_update_rate: + Self::GLOBAL_PROMETHEUS_COUNTERS_UPDATE_INTERVAL, + pending_egress_packets_update_rate: Self::DEFAULT_PENDING_EGRESS_PACKETS_UPDATE_RATE, + clients_sessions_update_rate: Self::DEFAULT_CLIENT_SESSIONS_UPDATE_RATE, + } + } +} + +#[derive(Debug, Default, Copy, Clone, Deserialize, PartialEq, Eq, Serialize)] +#[serde(deny_unknown_fields)] +pub struct LoggingSettingsV9 { + // well, we need to implement something here at some point... +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ConfigV9 { + // additional metadata holding on-disk location of this config file + #[serde(skip)] + pub(crate) save_path: Option, + + /// Human-readable ID of this particular node. + pub id: String, + + /// Current modes of this nym-node. + pub modes: NodeModesV9, + + pub host: HostV9, + + pub mixnet: MixnetV9, + + /// Storage paths to persistent nym-node data, such as its long term keys. + pub storage_paths: NymNodePathsV9, + + #[serde(default)] + pub http: HttpV9, + + #[serde(default)] + pub verloc: VerlocV9, + + pub wireguard: WireguardV9, + + #[serde(alias = "entry_gateway")] + pub gateway_tasks: GatewayTasksConfigV9, + + #[serde(alias = "exit_gateway")] + pub service_providers: ServiceProvidersConfigV9, + + #[serde(default)] + pub metrics: MetricsConfigV9, + + #[serde(default)] + pub logging: LoggingSettingsV9, + + #[serde(default)] + pub debug: DebugV9, +} + +impl ConfigV9 { + // simple wrapper that reads config file and assigns path location + fn read_from_path>(path: P) -> Result { + let path = path.as_ref(); + let mut loaded: ConfigV9 = + read_config_from_toml_file(path).map_err(|source| NymNodeError::ConfigLoadFailure { + path: path.to_path_buf(), + source, + })?; + loaded.save_path = Some(path.to_path_buf()); + debug!("loaded config file from {}", path.display()); + Ok(loaded) + } +} + +async fn upgrade_sphinx_key(old_cfg: &ConfigV9) -> Result<(PathBuf, PathBuf), NymNodeError> { + let current_sphinx_key_path = &old_cfg.storage_paths.keys.private_x25519_sphinx_key_file; + let current_pubkey_path = &old_cfg.storage_paths.keys.public_x25519_sphinx_key_file; + + let current_sphinx_key: x25519::PrivateKey = + load_key(current_sphinx_key_path, "sphinx private key")?; + + let keys_dir = current_sphinx_key_path + .parent() + .ok_or(NymNodeError::DataDirDerivationFailure)?; + + let primary_key_path = keys_dir.join(DEFAULT_PRIMARY_X25519_SPHINX_KEY_FILENAME); + let secondary_key_path = keys_dir.join(DEFAULT_SECONDARY_X25519_SPHINX_KEY_FILENAME); + + // we mark the current sphinx key as the primary and attach the current rotation id + let rotation_id = + get_current_rotation_id(&old_cfg.mixnet.nym_api_urls, &old_cfg.mixnet.nyxd_urls).await?; + + let primary_key = SphinxPrivateKey::import(current_sphinx_key, rotation_id); + store_key(&primary_key, &primary_key_path, "sphinx private key")?; + + // no point in keeping the old sphinx files + fs::remove_file(current_sphinx_key_path).map_err(|err| KeyIOFailure::KeyRemovalFailure { + key: "sphinx private key".to_string(), + path: current_sphinx_key_path.clone(), + err, + })?; + fs::remove_file(current_pubkey_path).map_err(|err| KeyIOFailure::KeyRemovalFailure { + key: "sphinx public key".to_string(), + path: current_pubkey_path.clone(), + err, + })?; + + Ok((primary_key_path, secondary_key_path)) +} + +#[instrument(skip_all)] +pub async fn try_upgrade_config_v9>( + path: P, + prev_config: Option, +) -> Result { + debug!("attempting to load v9 config..."); + + let old_cfg = if let Some(prev_config) = prev_config { + prev_config + } else { + ConfigV9::read_from_path(&path)? + }; + + let (primary_x25519_sphinx_key_file, secondary_x25519_sphinx_key_file) = + upgrade_sphinx_key(&old_cfg).await?; + + let cfg = Config { + save_path: old_cfg.save_path, + id: old_cfg.id, + modes: NodeModes { + mixnode: old_cfg.modes.mixnode, + entry: old_cfg.modes.entry, + exit: old_cfg.modes.exit, + }, + host: Host { + public_ips: old_cfg.host.public_ips, + hostname: old_cfg.host.hostname, + location: old_cfg.host.location, + }, + mixnet: Mixnet { + bind_address: old_cfg.mixnet.bind_address, + announce_port: old_cfg.mixnet.announce_port, + nym_api_urls: old_cfg.mixnet.nym_api_urls, + nyxd_urls: old_cfg.mixnet.nyxd_urls, + replay_protection: ReplayProtection { + storage_paths: ReplayProtectionPaths { + current_bloomfilters_directory: old_cfg + .mixnet + .replay_protection + .storage_paths + .current_bloomfilters_directory, + }, + debug: ReplayProtectionDebug { + unsafe_disabled: old_cfg.mixnet.replay_protection.debug.unsafe_disabled, + maximum_replay_detection_deferral: old_cfg + .mixnet + .replay_protection + .debug + .maximum_replay_detection_deferral, + maximum_replay_detection_pending_packets: old_cfg + .mixnet + .replay_protection + .debug + .maximum_replay_detection_pending_packets, + false_positive_rate: old_cfg.mixnet.replay_protection.debug.false_positive_rate, + initial_expected_packets_per_second: old_cfg + .mixnet + .replay_protection + .debug + .initial_expected_packets_per_second, + bloomfilter_minimum_packets_per_second_size: old_cfg + .mixnet + .replay_protection + .debug + .bloomfilter_minimum_packets_per_second_size, + bloomfilter_size_multiplier: old_cfg + .mixnet + .replay_protection + .debug + .bloomfilter_size_multiplier, + bloomfilter_disk_flushing_rate: old_cfg + .mixnet + .replay_protection + .debug + .bloomfilter_disk_flushing_rate, + }, + }, + key_rotation: Default::default(), + debug: MixnetDebug { + maximum_forward_packet_delay: old_cfg.mixnet.debug.maximum_forward_packet_delay, + packet_forwarding_initial_backoff: old_cfg + .mixnet + .debug + .packet_forwarding_initial_backoff, + packet_forwarding_maximum_backoff: old_cfg + .mixnet + .debug + .packet_forwarding_maximum_backoff, + initial_connection_timeout: old_cfg.mixnet.debug.initial_connection_timeout, + maximum_connection_buffer_size: old_cfg.mixnet.debug.maximum_connection_buffer_size, + unsafe_disable_noise: old_cfg.mixnet.debug.unsafe_disable_noise, + }, + }, + storage_paths: NymNodePaths { + keys: KeysPaths { + private_ed25519_identity_key_file: old_cfg + .storage_paths + .keys + .private_ed25519_identity_key_file, + public_ed25519_identity_key_file: old_cfg + .storage_paths + .keys + .public_ed25519_identity_key_file, + primary_x25519_sphinx_key_file, + private_x25519_noise_key_file: old_cfg + .storage_paths + .keys + .private_x25519_noise_key_file, + public_x25519_noise_key_file: old_cfg + .storage_paths + .keys + .public_x25519_noise_key_file, + secondary_x25519_sphinx_key_file, + }, + description: old_cfg.storage_paths.description, + }, + http: Http { + bind_address: old_cfg.http.bind_address, + landing_page_assets_path: old_cfg.http.landing_page_assets_path, + access_token: old_cfg.http.access_token, + expose_system_info: old_cfg.http.expose_system_info, + expose_system_hardware: old_cfg.http.expose_system_hardware, + expose_crypto_hardware: old_cfg.http.expose_crypto_hardware, + node_load_cache_ttl: old_cfg.http.node_load_cache_ttl, + }, + verloc: Verloc { + bind_address: old_cfg.verloc.bind_address, + announce_port: old_cfg.verloc.announce_port, + debug: VerlocDebug { + packets_per_node: old_cfg.verloc.debug.packets_per_node, + connection_timeout: old_cfg.verloc.debug.connection_timeout, + packet_timeout: old_cfg.verloc.debug.packet_timeout, + delay_between_packets: old_cfg.verloc.debug.delay_between_packets, + tested_nodes_batch_size: old_cfg.verloc.debug.tested_nodes_batch_size, + testing_interval: old_cfg.verloc.debug.testing_interval, + retry_timeout: old_cfg.verloc.debug.retry_timeout, + }, + }, + wireguard: Wireguard { + enabled: old_cfg.wireguard.enabled, + bind_address: old_cfg.wireguard.bind_address, + private_ipv4: old_cfg.wireguard.private_ipv4, + private_ipv6: old_cfg.wireguard.private_ipv6, + announced_port: old_cfg.wireguard.announced_port, + private_network_prefix_v4: old_cfg.wireguard.private_network_prefix_v4, + private_network_prefix_v6: old_cfg.wireguard.private_network_prefix_v6, + storage_paths: WireguardPaths { + private_diffie_hellman_key_file: old_cfg + .wireguard + .storage_paths + .private_diffie_hellman_key_file, + public_diffie_hellman_key_file: old_cfg + .wireguard + .storage_paths + .public_diffie_hellman_key_file, + }, + }, + gateway_tasks: GatewayTasksConfig { + storage_paths: GatewayTasksPaths { + clients_storage: old_cfg.gateway_tasks.storage_paths.clients_storage, + stats_storage: old_cfg.gateway_tasks.storage_paths.stats_storage, + cosmos_mnemonic: old_cfg.gateway_tasks.storage_paths.cosmos_mnemonic, + }, + enforce_zk_nyms: old_cfg.gateway_tasks.enforce_zk_nyms, + ws_bind_address: old_cfg.gateway_tasks.ws_bind_address, + announce_ws_port: old_cfg.gateway_tasks.announce_ws_port, + announce_wss_port: old_cfg.gateway_tasks.announce_wss_port, + debug: gateway_tasks::Debug { + message_retrieval_limit: old_cfg.gateway_tasks.debug.message_retrieval_limit, + maximum_open_connections: old_cfg.gateway_tasks.debug.maximum_open_connections, + minimum_mix_performance: old_cfg.gateway_tasks.debug.minimum_mix_performance, + max_request_timestamp_skew: old_cfg.gateway_tasks.debug.max_request_timestamp_skew, + stale_messages: StaleMessageDebug { + cleaner_run_interval: old_cfg + .gateway_tasks + .debug + .stale_messages + .cleaner_run_interval, + max_age: old_cfg.gateway_tasks.debug.stale_messages.max_age, + }, + client_bandwidth: ClientBandwidthDebug { + max_flushing_rate: old_cfg + .gateway_tasks + .debug + .client_bandwidth + .max_flushing_rate, + max_delta_flushing_amount: old_cfg + .gateway_tasks + .debug + .client_bandwidth + .max_delta_flushing_amount, + }, + zk_nym_tickets: ZkNymTicketHandlerDebug { + revocation_bandwidth_penalty: old_cfg + .gateway_tasks + .debug + .zk_nym_tickets + .revocation_bandwidth_penalty, + pending_poller: old_cfg.gateway_tasks.debug.zk_nym_tickets.pending_poller, + minimum_api_quorum: old_cfg + .gateway_tasks + .debug + .zk_nym_tickets + .minimum_api_quorum, + minimum_redemption_tickets: old_cfg + .gateway_tasks + .debug + .zk_nym_tickets + .minimum_redemption_tickets, + maximum_time_between_redemption: old_cfg + .gateway_tasks + .debug + .zk_nym_tickets + .maximum_time_between_redemption, + }, + }, + }, + service_providers: ServiceProvidersConfig { + storage_paths: ServiceProvidersPaths { + clients_storage: old_cfg.service_providers.storage_paths.clients_storage, + stats_storage: old_cfg.service_providers.storage_paths.stats_storage, + network_requester: NetworkRequesterPaths { + private_ed25519_identity_key_file: old_cfg + .service_providers + .storage_paths + .network_requester + .private_ed25519_identity_key_file, + public_ed25519_identity_key_file: old_cfg + .service_providers + .storage_paths + .network_requester + .public_ed25519_identity_key_file, + private_x25519_diffie_hellman_key_file: old_cfg + .service_providers + .storage_paths + .network_requester + .private_x25519_diffie_hellman_key_file, + public_x25519_diffie_hellman_key_file: old_cfg + .service_providers + .storage_paths + .network_requester + .public_x25519_diffie_hellman_key_file, + ack_key_file: old_cfg + .service_providers + .storage_paths + .network_requester + .ack_key_file, + reply_surb_database: old_cfg + .service_providers + .storage_paths + .network_requester + .reply_surb_database, + gateway_registrations: old_cfg + .service_providers + .storage_paths + .network_requester + .gateway_registrations, + }, + ip_packet_router: IpPacketRouterPaths { + private_ed25519_identity_key_file: old_cfg + .service_providers + .storage_paths + .ip_packet_router + .private_ed25519_identity_key_file, + public_ed25519_identity_key_file: old_cfg + .service_providers + .storage_paths + .ip_packet_router + .public_ed25519_identity_key_file, + private_x25519_diffie_hellman_key_file: old_cfg + .service_providers + .storage_paths + .ip_packet_router + .private_x25519_diffie_hellman_key_file, + public_x25519_diffie_hellman_key_file: old_cfg + .service_providers + .storage_paths + .ip_packet_router + .public_x25519_diffie_hellman_key_file, + ack_key_file: old_cfg + .service_providers + .storage_paths + .ip_packet_router + .ack_key_file, + reply_surb_database: old_cfg + .service_providers + .storage_paths + .ip_packet_router + .reply_surb_database, + gateway_registrations: old_cfg + .service_providers + .storage_paths + .ip_packet_router + .gateway_registrations, + }, + authenticator: AuthenticatorPaths { + private_ed25519_identity_key_file: old_cfg + .service_providers + .storage_paths + .authenticator + .private_ed25519_identity_key_file, + public_ed25519_identity_key_file: old_cfg + .service_providers + .storage_paths + .authenticator + .public_ed25519_identity_key_file, + private_x25519_diffie_hellman_key_file: old_cfg + .service_providers + .storage_paths + .authenticator + .private_x25519_diffie_hellman_key_file, + public_x25519_diffie_hellman_key_file: old_cfg + .service_providers + .storage_paths + .authenticator + .public_x25519_diffie_hellman_key_file, + ack_key_file: old_cfg + .service_providers + .storage_paths + .authenticator + .ack_key_file, + reply_surb_database: old_cfg + .service_providers + .storage_paths + .authenticator + .reply_surb_database, + gateway_registrations: old_cfg + .service_providers + .storage_paths + .authenticator + .gateway_registrations, + }, + }, + open_proxy: old_cfg.service_providers.open_proxy, + upstream_exit_policy_url: old_cfg.service_providers.upstream_exit_policy_url, + network_requester: NetworkRequester { + debug: NetworkRequesterDebug { + enabled: old_cfg.service_providers.network_requester.debug.enabled, + disable_poisson_rate: old_cfg + .service_providers + .network_requester + .debug + .disable_poisson_rate, + client_debug: old_cfg + .service_providers + .network_requester + .debug + .client_debug, + }, + }, + ip_packet_router: IpPacketRouter { + debug: IpPacketRouterDebug { + enabled: old_cfg.service_providers.ip_packet_router.debug.enabled, + disable_poisson_rate: old_cfg + .service_providers + .ip_packet_router + .debug + .disable_poisson_rate, + client_debug: old_cfg + .service_providers + .ip_packet_router + .debug + .client_debug, + }, + }, + authenticator: Authenticator { + debug: AuthenticatorDebug { + enabled: old_cfg.service_providers.authenticator.debug.enabled, + disable_poisson_rate: old_cfg + .service_providers + .authenticator + .debug + .disable_poisson_rate, + client_debug: old_cfg.service_providers.authenticator.debug.client_debug, + }, + }, + debug: service_providers::Debug { + message_retrieval_limit: old_cfg.service_providers.debug.message_retrieval_limit, + }, + }, + metrics: Default::default(), + logging: LoggingSettings {}, + debug: Default::default(), + }; + Ok(cfg) +} diff --git a/nym-node/src/config/persistence.rs b/nym-node/src/config/persistence.rs index 9de7f868cc7..087bcd5b760 100644 --- a/nym-node/src/config/persistence.rs +++ b/nym-node/src/config/persistence.rs @@ -13,8 +13,8 @@ use zeroize::Zeroizing; // Global: pub const DEFAULT_ED25519_PRIVATE_IDENTITY_KEY_FILENAME: &str = "ed25519_identity"; pub const DEFAULT_ED25519_PUBLIC_IDENTITY_KEY_FILENAME: &str = "ed25519_identity.pub"; -pub const DEFAULT_X25519_PRIVATE_SPHINX_KEY_FILENAME: &str = "x25519_sphinx"; -pub const DEFAULT_X25519_PUBLIC_SPHINX_KEY_FILENAME: &str = "x25519_sphinx.pub"; +pub const DEFAULT_PRIMARY_X25519_SPHINX_KEY_FILENAME: &str = "x25519_sphinx_primary"; +pub const DEFAULT_SECONDARY_X25519_SPHINX_KEY_FILENAME: &str = "x25519_sphinx_secondary"; pub const DEFAULT_X25519_PRIVATE_NOISE_KEY_FILENAME: &str = "x25519_noise"; pub const DEFAULT_X25519_PUBLIC_NOISE_KEY_FILENAME: &str = "x25519_noise.pub"; pub const DEFAULT_NYMNODE_DESCRIPTION_FILENAME: &str = "description.toml"; @@ -59,7 +59,6 @@ pub const DEFAULT_X25519_WG_PUBLIC_DH_KEY_FILENAME: &str = "x25519_wg_dh.pub"; pub const DEFAULT_RD_BLOOMFILTER_SUBDIR: &str = "replay-detection"; pub const DEFAULT_RD_BLOOMFILTER_FILE_EXT: &str = "bloom"; pub const DEFAULT_RD_BLOOMFILTER_FLUSH_FILE_EXT: &str = "flush"; -pub const CURRENT_RD_BLOOMFILTER_FILENAME: &str = "current"; #[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] #[serde(deny_unknown_fields)] @@ -82,7 +81,6 @@ impl NymNodePaths { } #[derive(Debug, Clone, Deserialize, PartialEq, Eq, Serialize)] -#[serde(deny_unknown_fields)] pub struct KeysPaths { /// Path to file containing ed25519 identity private key. pub private_ed25519_identity_key_file: PathBuf, @@ -90,11 +88,11 @@ pub struct KeysPaths { /// Path to file containing ed25519 identity public key. pub public_ed25519_identity_key_file: PathBuf, - /// Path to file containing x25519 sphinx private key. - pub private_x25519_sphinx_key_file: PathBuf, + /// Path to file containing the primary x25519 sphinx private key. + pub primary_x25519_sphinx_key_file: PathBuf, - /// Path to file containing x25519 sphinx public key. - pub public_x25519_sphinx_key_file: PathBuf, + /// Path to file containing the secondary x25519 sphinx private key. + pub secondary_x25519_sphinx_key_file: PathBuf, /// Path to file containing x25519 noise private key. pub private_x25519_noise_key_file: PathBuf, @@ -112,9 +110,10 @@ impl KeysPaths { .join(DEFAULT_ED25519_PRIVATE_IDENTITY_KEY_FILENAME), public_ed25519_identity_key_file: data_dir .join(DEFAULT_ED25519_PUBLIC_IDENTITY_KEY_FILENAME), - private_x25519_sphinx_key_file: data_dir - .join(DEFAULT_X25519_PRIVATE_SPHINX_KEY_FILENAME), - public_x25519_sphinx_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_SPHINX_KEY_FILENAME), + primary_x25519_sphinx_key_file: data_dir + .join(DEFAULT_PRIMARY_X25519_SPHINX_KEY_FILENAME), + secondary_x25519_sphinx_key_file: data_dir + .join(DEFAULT_SECONDARY_X25519_SPHINX_KEY_FILENAME), private_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PRIVATE_NOISE_KEY_FILENAME), public_x25519_noise_key_file: data_dir.join(DEFAULT_X25519_PUBLIC_NOISE_KEY_FILENAME), } @@ -127,13 +126,6 @@ impl KeysPaths { ) } - pub fn x25519_sphinx_storage_paths(&self) -> nym_pemstore::KeyPairPath { - nym_pemstore::KeyPairPath::new( - &self.private_x25519_sphinx_key_file, - &self.public_x25519_sphinx_key_file, - ) - } - pub fn x25519_noise_storage_paths(&self) -> nym_pemstore::KeyPairPath { nym_pemstore::KeyPairPath::new( &self.private_x25519_noise_key_file, @@ -504,20 +496,6 @@ pub struct ReplayProtectionPaths { pub current_bloomfilters_directory: PathBuf, } -impl ReplayProtectionPaths { - pub fn current_bloomfilter_filepath(&self) -> PathBuf { - self.current_bloomfilters_directory - .join(CURRENT_RD_BLOOMFILTER_FILENAME) - .with_extension(DEFAULT_RD_BLOOMFILTER_FILE_EXT) - } - - pub fn current_bloomfilter_being_flushed_filepath(&self) -> PathBuf { - self.current_bloomfilters_directory - .join(CURRENT_RD_BLOOMFILTER_FILENAME) - .with_extension(DEFAULT_RD_BLOOMFILTER_FLUSH_FILE_EXT) - } -} - impl ReplayProtectionPaths { pub fn new>(data_dir: P) -> Self { ReplayProtectionPaths { diff --git a/nym-node/src/config/template.rs b/nym-node/src/config/template.rs index 96f66b246be..bfe413a62bc 100644 --- a/nym-node/src/config/template.rs +++ b/nym-node/src/config/template.rs @@ -84,11 +84,11 @@ private_ed25519_identity_key_file = '{{ storage_paths.keys.private_ed25519_ident # Path to file containing ed25519 identity public key. public_ed25519_identity_key_file = '{{ storage_paths.keys.public_ed25519_identity_key_file }}' -# Path to file containing x25519 sphinx private key. -private_x25519_sphinx_key_file = '{{ storage_paths.keys.private_x25519_sphinx_key_file }}' +# Path to file containing the primary x25519 sphinx private key. +primary_x25519_sphinx_key_file = '{{ storage_paths.keys.primary_x25519_sphinx_key_file }}' -# Path to file containing x25519 sphinx public key. -public_x25519_sphinx_key_file = '{{ storage_paths.keys.public_x25519_sphinx_key_file }}' +# Path to file containing the secondary x25519 sphinx private key. +secondary_x25519_sphinx_key_file = '{{ storage_paths.keys.secondary_x25519_sphinx_key_file }}' # Path to file containing x25519 noise private key. private_x25519_noise_key_file = '{{ storage_paths.keys.private_x25519_noise_key_file }}' diff --git a/nym-node/src/config/upgrade_helpers.rs b/nym-node/src/config/upgrade_helpers.rs index 11a12afb76d..85e2cced9c0 100644 --- a/nym-node/src/config/upgrade_helpers.rs +++ b/nym-node/src/config/upgrade_helpers.rs @@ -7,7 +7,6 @@ use crate::error::NymNodeError; use std::path::Path; use tracing::debug; -// currently there are no upgrades async fn try_upgrade_config(path: &Path) -> Result<(), NymNodeError> { let cfg = try_upgrade_config_v1(path, None).await.ok(); let cfg = try_upgrade_config_v2(path, cfg).await.ok(); @@ -16,10 +15,11 @@ async fn try_upgrade_config(path: &Path) -> Result<(), NymNodeError> { let cfg = try_upgrade_config_v5(path, cfg).await.ok(); let cfg = try_upgrade_config_v6(path, cfg).await.ok(); let cfg = try_upgrade_config_v7(path, cfg).await.ok(); - match try_upgrade_config_v8(path, cfg).await { + let cfg = try_upgrade_config_v8(path, cfg).await.ok(); + match try_upgrade_config_v9(path, cfg).await { Ok(cfg) => cfg.save(), Err(e) => { - tracing::error!("Failed to finish upgrade - {e}"); + tracing::error!("Failed to finish upgrade: {e}"); Err(NymNodeError::FailedUpgrade) } } diff --git a/nym-node/src/error.rs b/nym-node/src/error.rs index 519f927e690..544227c2b36 100644 --- a/nym-node/src/error.rs +++ b/nym-node/src/error.rs @@ -5,6 +5,7 @@ use crate::node::http::error::NymNodeHttpError; use crate::wireguard::error::WireguardError; use nym_http_api_client::HttpClientError; use nym_ip_packet_router::error::ClientCoreError; +use nym_validator_client::nyxd::error::NyxdError; use nym_validator_client::ValidatorClientError; use std::io; use std::net::IpAddr; @@ -45,6 +46,32 @@ pub enum KeyIOFailure { #[source] err: io::Error, }, + + #[error("failed to move {key} key from '{}' to '{}': {err}", source.display(), destination.display())] + KeyMoveFailure { + key: String, + source: PathBuf, + destination: PathBuf, + #[source] + err: io::Error, + }, + + #[error("failed to copy {key} key from '{}' to '{}': {err}", source.display(), destination.display())] + KeyCopyFailure { + key: String, + source: PathBuf, + destination: PathBuf, + #[source] + err: io::Error, + }, + + #[error("failed to remove {key} key from '{}': {err}", path.display())] + KeyRemovalFailure { + key: String, + path: PathBuf, + #[source] + err: io::Error, + }, } #[derive(Debug, Error)] @@ -58,6 +85,9 @@ pub enum NymNodeError { source: io::Error, }, + #[error("received shutdown signal while attempting to complete the action")] + ShutdownReceived, + #[error("could not find an existing config file at '{}' and fresh node initialisation has been disabled", config_path.display())] ForbiddenInitialisation { config_path: PathBuf }, @@ -115,6 +145,23 @@ pub enum NymNodeError { #[error("this node hasn't set any valid public addresses to announce. Please modify [host.public_ips] section of your config")] NoPublicIps, + #[error("there are no available nym api endpoints")] + NoNymApiUrls, + + #[error("failed to resolve nym-api query - no nodes returned a valid response")] + NymApisExhausted, + + #[error("the current epoch appears to be stuck")] + StuckEpoch, + + // this should never happen in normal usage, but it's better to throw it than to panic + // in case of bugs + #[error("sphinx keys have already been consumed to spawn the node tasks")] + ConsumedSphinxKeys, + + #[error("failed to resolve chain query: {0}")] + NyxdFailure(#[from] NyxdError), + #[error("this node attempted to announce an invalid public address: {address}. Please modify [host.public_ips] section of your config. Alternatively, if you wanted to use it in the local setting, run the node with the '--local' flag.")] InvalidPublicIp { address: IpAddr }, @@ -157,6 +204,9 @@ pub enum NymNodeError { #[error("failed to save/load the bloomfilter: {source} using path: {}", path.display())] BloomfilterIoFailure { source: io::Error, path: PathBuf }, + #[error("failed to deserialise bloomfilter metadata")] + BloomfilterMetadataDeserialisationFailure, + #[error(transparent)] GatewayFailure(#[from] nym_gateway::GatewayError), diff --git a/nym-node/src/node/helpers.rs b/nym-node/src/node/helpers.rs index aa9e3525e45..bd62bb3740b 100644 --- a/nym-node/src/node/helpers.rs +++ b/nym-node/src/node/helpers.rs @@ -3,13 +3,42 @@ use crate::config::NodeModes; use crate::error::{KeyIOFailure, NymNodeError}; +use crate::node::key_rotation::key::{SphinxPrivateKey, SphinxPublicKey}; +use crate::node::nym_apis_client::NymApisClient; use nym_crypto::asymmetric::{ed25519, x25519}; use nym_node_requests::api::v1::node::models::NodeDescription; use nym_pemstore::traits::{PemStorableKey, PemStorableKeyPair}; use nym_pemstore::KeyPairPath; +use nym_task::ShutdownToken; +use nym_validator_client::nyxd::contract_traits::MixnetQueryClient; +use nym_validator_client::QueryHttpRpcNyxdClient; use serde::Serialize; use std::fmt::{Display, Formatter}; use std::path::Path; +use tracing::warn; +use url::Url; + +#[derive(Debug, Serialize)] +pub(crate) struct DisplaySphinxKey { + public_key: String, + rotation_id: u32, +} + +impl From<&SphinxPrivateKey> for DisplaySphinxKey { + fn from(value: &SphinxPrivateKey) -> Self { + let pubkey: SphinxPublicKey = value.into(); + DisplaySphinxKey { + public_key: pubkey.inner.to_base58_string(), + rotation_id: pubkey.rotation_id, + } + } +} + +impl Display for DisplaySphinxKey { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{} (rotation: {})", self.public_key, self.rotation_id) + } +} #[derive(Debug, Serialize)] pub(crate) struct DisplayDetails { @@ -18,7 +47,8 @@ pub(crate) struct DisplayDetails { pub(crate) description: NodeDescription, pub(crate) ed25519_identity_key: String, - pub(crate) x25519_sphinx_key: String, + pub(crate) x25519_primary_sphinx_key: DisplaySphinxKey, + pub(crate) x25519_secondary_sphinx_key: Option, pub(crate) x25519_noise_key: String, pub(crate) x25519_wireguard_key: String, @@ -39,7 +69,14 @@ impl Display for DisplayDetails { )?; writeln!(f, "details: '{}'", self.description.details)?; writeln!(f, "ed25519 identity: {}", self.ed25519_identity_key)?; - writeln!(f, "x25519 sphinx: {}", self.x25519_sphinx_key)?; + writeln!( + f, + "x25519 primary sphinx: {}", + self.x25519_primary_sphinx_key + )?; + if let Some(secondary) = &self.x25519_secondary_sphinx_key { + writeln!(f, "x25519 primary sphinx: {secondary}")?; + } writeln!(f, "x25519 noise: {}", self.x25519_noise_key)?; writeln!( f, @@ -61,24 +98,24 @@ impl Display for DisplayDetails { } pub(crate) fn load_keypair( - paths: KeyPairPath, + paths: &KeyPairPath, name: impl Into, ) -> Result { - nym_pemstore::load_keypair(&paths).map_err(|err| KeyIOFailure::KeyPairLoadFailure { + nym_pemstore::load_keypair(paths).map_err(|err| KeyIOFailure::KeyPairLoadFailure { keys: name.into(), - paths, + paths: paths.clone(), err, }) } pub(crate) fn store_keypair( keys: &T, - paths: KeyPairPath, + paths: &KeyPairPath, name: impl Into, ) -> Result<(), KeyIOFailure> { - nym_pemstore::store_keypair(keys, &paths).map_err(|err| KeyIOFailure::KeyPairStoreFailure { + nym_pemstore::store_keypair(keys, paths).map_err(|err| KeyIOFailure::KeyPairStoreFailure { keys: name.into(), - paths, + paths: paths.clone(), err, }) } @@ -108,7 +145,7 @@ where } pub(crate) fn load_ed25519_identity_keypair( - paths: KeyPairPath, + paths: &KeyPairPath, ) -> Result { Ok(load_keypair(paths, "ed25519-identity")?) } @@ -120,41 +157,54 @@ pub(crate) fn load_ed25519_identity_public_key>( Ok(load_key(path, "ed25519-identity-public-key")?) } -pub(crate) fn load_x25519_sphinx_keypair( - paths: KeyPairPath, -) -> Result { - Ok(load_keypair(paths, "x25519-sphinx")?) -} - pub(crate) fn load_x25519_noise_keypair( - paths: KeyPairPath, + paths: &KeyPairPath, ) -> Result { Ok(load_keypair(paths, "x25519-noise")?) } pub(crate) fn load_x25519_wireguard_keypair( - paths: KeyPairPath, + paths: &KeyPairPath, ) -> Result { Ok(load_keypair(paths, "x25519-wireguard")?) } pub(crate) fn store_ed25519_identity_keypair( keys: &ed25519::KeyPair, - paths: KeyPairPath, + paths: &KeyPairPath, ) -> Result<(), NymNodeError> { Ok(store_keypair(keys, paths, "ed25519-identity")?) } -pub(crate) fn store_x25519_sphinx_keypair( - keys: &x25519::KeyPair, - paths: KeyPairPath, -) -> Result<(), NymNodeError> { - Ok(store_keypair(keys, paths, "x25519-sphinx")?) -} - pub(crate) fn store_x25519_noise_keypair( keys: &x25519::KeyPair, - paths: KeyPairPath, + paths: &KeyPairPath, ) -> Result<(), NymNodeError> { Ok(store_keypair(keys, paths, "x25519-noise")?) } + +pub(crate) async fn get_current_rotation_id( + nym_apis: &[Url], + fallback_nyxd: &[Url], +) -> Result { + let apis_client = NymApisClient::new(nym_apis, ShutdownToken::ephemeral())?; + if let Ok(rotation_info) = apis_client.get_key_rotation_info().await { + if rotation_info.is_epoch_stuck() { + return Err(NymNodeError::StuckEpoch); + } + let current_epoch = rotation_info.current_absolute_epoch_id; + return Ok(rotation_info + .key_rotation_state + .key_rotation_id(current_epoch)); + } + warn!("failed to retrieve key rotation id from nym apis. falling back to contract query"); + + for nyxd_url in fallback_nyxd { + let client = QueryHttpRpcNyxdClient::connect_to_default_env(nyxd_url.as_str())?; + if let Ok(res) = client.get_key_rotation_id().await { + return Ok(res.rotation_id); + } + } + + Err(NymNodeError::NymApisExhausted) +} diff --git a/nym-node/src/node/http/helpers/mod.rs b/nym-node/src/node/http/helpers/mod.rs index a819bdd062d..6375f304e4f 100644 --- a/nym-node/src/node/http/helpers/mod.rs +++ b/nym-node/src/node/http/helpers/mod.rs @@ -1,38 +1,4 @@ // Copyright 2024 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::config::Config; -use crate::error::NymNodeError; -use crate::node::http::api::api_requests; -use crate::node::http::error::NymNodeHttpError; -use nym_crypto::asymmetric::{ed25519, x25519}; -use nym_node_requests::api::SignedHostInformation; - pub mod system_info; - -pub(crate) fn sign_host_details( - config: &Config, - x22519_sphinx: &x25519::PublicKey, - x25519_noise: &x25519::PublicKey, - ed22519_identity: &ed25519::KeyPair, -) -> Result { - let x25519_noise = if config.mixnet.debug.unsafe_disable_noise { - None - } else { - Some(*x25519_noise) - }; - - let host_info = api_requests::v1::node::models::HostInformation { - ip_address: config.host.public_ips.clone(), - hostname: config.host.hostname.clone(), - keys: api_requests::v1::node::models::HostKeys { - ed25519_identity: *ed22519_identity.public_key(), - x25519_sphinx: *x22519_sphinx, - x25519_noise, - }, - }; - - let signed_info = SignedHostInformation::new(host_info, ed22519_identity.private_key()) - .map_err(NymNodeHttpError::from)?; - Ok(signed_info) -} diff --git a/nym-node/src/node/http/router/api/v1/node/host_information.rs b/nym-node/src/node/http/router/api/v1/node/host_information.rs index 76eb5ddfa02..32a8c9ea6ec 100644 --- a/nym-node/src/node/http/router/api/v1/node/host_information.rs +++ b/nym-node/src/node/http/router/api/v1/node/host_information.rs @@ -1,7 +1,9 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use axum::extract::Query; +use crate::node::http::api::api_requests; +use crate::node::http::state::AppState; +use axum::extract::{Query, State}; use nym_http_api_common::{FormattedResponse, OutputParams}; use nym_node_requests::api::{v1::node::models::SignedHostInformation, SignedDataHostInfo}; @@ -20,11 +22,54 @@ use nym_node_requests::api::{v1::node::models::SignedHostInformation, SignedData params(OutputParams) )] pub(crate) async fn host_information( - host_information: SignedHostInformation, Query(output): Query, + State(state): State, ) -> HostInformationResponse { let output = output.output.unwrap_or_default(); - output.to_response(host_information) + + let primary_key = state.x25519_sphinx_keys.primary(); + let pre_announced = match state.x25519_sphinx_keys.secondary() { + None => None, + Some(secondary_key) => { + if secondary_key.rotation_id() == primary_key.rotation_id() + 1 { + Some(api_requests::v1::node::models::SphinxKey { + rotation_id: secondary_key.rotation_id(), + public_key: secondary_key.x25519_pubkey(), + }) + } else { + None + } + } + }; + + let primary_pubkey = primary_key.x25519_pubkey(); + + #[allow(deprecated)] + let host_info = api_requests::v1::node::models::HostInformation { + ip_address: state.static_information.ip_addresses.clone(), + hostname: state.static_information.hostname.clone(), + keys: api_requests::v1::node::models::HostKeys { + ed25519_identity: *state.static_information.ed25519_identity_keys.public_key(), + x25519_sphinx: primary_pubkey, + primary_x25519_sphinx_key: api_requests::v1::node::models::SphinxKey { + rotation_id: primary_key.rotation_id(), + public_key: primary_pubkey, + }, + x25519_noise: state.static_information.x25519_noise_key, + pre_announced_x25519_sphinx_key: pre_announced, + }, + }; + + // SAFETY: the only way for this call to fail is if serialisation of HostInformation fails. + // however, that conversion is stable and infallible + #[allow(clippy::unwrap_used)] + let signed_info = SignedHostInformation::new( + host_info, + state.static_information.ed25519_identity_keys.private_key(), + ) + .unwrap(); + + output.to_response(signed_info) } pub type HostInformationResponse = FormattedResponse; diff --git a/nym-node/src/node/http/router/api/v1/node/mod.rs b/nym-node/src/node/http/router/api/v1/node/mod.rs index f62b17ab3b7..3b115602b7d 100644 --- a/nym-node/src/node/http/router/api/v1/node/mod.rs +++ b/nym-node/src/node/http/router/api/v1/node/mod.rs @@ -7,6 +7,7 @@ use crate::node::http::api::v1::node::description::description; use crate::node::http::api::v1::node::hardware::host_system; use crate::node::http::api::v1::node::host_information::host_information; use crate::node::http::api::v1::node::roles::roles; +use crate::node::http::state::AppState; use axum::routing::get; use axum::Router; use nym_node_requests::api::v1::node::models; @@ -22,14 +23,13 @@ pub mod roles; #[derive(Debug, Clone)] pub struct Config { pub build_information: models::BinaryBuildInformationOwned, - pub host_information: models::SignedHostInformation, pub system_info: Option, pub roles: models::NodeRoles, pub description: models::NodeDescription, pub auxiliary_details: models::AuxiliaryDetails, } -pub(super) fn routes(config: Config) -> Router { +pub(super) fn routes(config: Config) -> Router { Router::new() .route( v1::BUILD_INFO, @@ -45,13 +45,7 @@ pub(super) fn routes(config: Config) -> Router move |query| roles(node_roles, query) }), ) - .route( - v1::HOST_INFO, - get({ - let host_info = config.host_information; - move |query| host_information(host_info, query) - }), - ) + .route(v1::HOST_INFO, get(host_information)) .route( v1::SYSTEM_INFO, get({ diff --git a/nym-node/src/node/http/router/mod.rs b/nym-node/src/node/http/router/mod.rs index ae9af2371b3..c296afcea95 100644 --- a/nym-node/src/node/http/router/mod.rs +++ b/nym-node/src/node/http/router/mod.rs @@ -16,7 +16,6 @@ use nym_node_requests::api::v1::mixnode::models::Mixnode; use nym_node_requests::api::v1::network_requester::exit_policy::models::UsedExitPolicy; use nym_node_requests::api::v1::network_requester::models::NetworkRequester; use nym_node_requests::api::v1::node::models::{AuxiliaryDetails, HostSystem, NodeDescription}; -use nym_node_requests::api::SignedHostInformation; use nym_node_requests::routes; use std::net::SocketAddr; use std::path::Path; @@ -34,14 +33,13 @@ pub struct HttpServerConfig { } impl HttpServerConfig { - pub fn new(host_information: SignedHostInformation) -> Self { + pub fn new() -> Self { HttpServerConfig { landing: Default::default(), api: api::Config { v1_config: api::v1::Config { node: api::v1::node::Config { build_information: bin_info_owned!(), - host_information, system_info: None, roles: Default::default(), description: Default::default(), @@ -118,6 +116,7 @@ impl HttpServerConfig { self } + #[must_use] pub fn with_prometheus_bearer_token(mut self, bearer_token: Option) -> Self { self.api.v1_config.metrics.bearer_token = bearer_token.map(|b| Arc::new(Zeroizing::new(b))); self diff --git a/nym-node/src/node/http/state/mod.rs b/nym-node/src/node/http/state/mod.rs index 33dcf4ea9fe..7d0100e7ded 100644 --- a/nym-node/src/node/http/state/mod.rs +++ b/nym-node/src/node/http/state/mod.rs @@ -1,20 +1,35 @@ -// Copyright 2023-2024 - Nym Technologies SA +// Copyright 2023-2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only use crate::node::http::state::load::CachedNodeLoad; use crate::node::http::state::metrics::MetricsAppState; +use crate::node::key_rotation::active_keys::ActiveSphinxKeys; +use nym_crypto::asymmetric::{ed25519, x25519}; use nym_node_metrics::NymNodeMetrics; use nym_verloc::measurements::SharedVerlocStats; +use std::net::IpAddr; +use std::sync::Arc; use std::time::Duration; use tokio::time::Instant; pub mod load; pub mod metrics; +pub(crate) struct StaticNodeInformation { + pub(crate) ed25519_identity_keys: Arc, + pub(crate) x25519_noise_key: Option, + pub(crate) ip_addresses: Vec, + pub(crate) hostname: Option, +} + #[derive(Clone)] -pub struct AppState { +pub(crate) struct AppState { pub(crate) startup_time: Instant, + pub(crate) static_information: Arc, + + pub(crate) x25519_sphinx_keys: ActiveSphinxKeys, + pub(crate) cached_load: CachedNodeLoad, pub(crate) metrics: MetricsAppState, @@ -22,12 +37,17 @@ pub struct AppState { impl AppState { #[allow(clippy::new_without_default)] - pub fn new( + pub(crate) fn new( + static_information: StaticNodeInformation, + x25519_sphinx_keys: ActiveSphinxKeys, metrics: NymNodeMetrics, verloc: SharedVerlocStats, load_cache_ttl: Duration, ) -> Self { AppState { + static_information: Arc::new(static_information), + x25519_sphinx_keys, + // is it 100% accurate? // no. // does it have to be? diff --git a/nym-node/src/node/key_rotation/active_keys.rs b/nym-node/src/node/key_rotation/active_keys.rs new file mode 100644 index 00000000000..d018db4e889 --- /dev/null +++ b/nym-node/src/node/key_rotation/active_keys.rs @@ -0,0 +1,160 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node::key_rotation::key::SphinxPrivateKey; +use arc_swap::{ArcSwap, ArcSwapOption, Guard}; +use std::ops::Deref; +use std::sync::Arc; +use tracing::error; + +#[derive(Clone)] +pub(crate) struct ActiveSphinxKeys { + inner: Arc, +} + +struct ActiveSphinxKeysInner { + /// Key that's currently used as the default when processing packets with no explicit rotation information + primary_key: ArcSwap, + + /// Optionally, a secondary key associated with this node. depending on the context it could either be + /// the pre-announced key for the following rotation or a key from the previous rotation for the overlap period + secondary_key: ArcSwapOption, +} + +impl ActiveSphinxKeys { + pub(crate) fn new_fresh(primary: SphinxPrivateKey) -> Self { + ActiveSphinxKeys { + inner: Arc::new(ActiveSphinxKeysInner { + primary_key: ArcSwap::from_pointee(primary), + secondary_key: Default::default(), + }), + } + } + + pub(crate) fn new_loaded( + primary: SphinxPrivateKey, + secondary: Option, + ) -> Self { + ActiveSphinxKeys { + inner: Arc::new(ActiveSphinxKeysInner { + primary_key: ArcSwap::from_pointee(primary), + secondary_key: ArcSwapOption::from_pointee(secondary), + }), + } + } + + pub(crate) fn even(&self) -> Option { + let primary = self.inner.primary_key.load(); + if primary.is_even_rotation() { + return Some(SphinxKeyGuard::Primary(primary)); + } + self.secondary() + } + + pub(crate) fn odd(&self) -> Option { + let primary = self.inner.primary_key.load(); + if !primary.is_even_rotation() { + return Some(SphinxKeyGuard::Primary(primary)); + } + self.secondary() + } + + pub(crate) fn primary(&self) -> SphinxKeyGuard { + SphinxKeyGuard::Primary(self.inner.primary_key.load()) + } + + pub(crate) fn secondary(&self) -> Option { + let guard = self.inner.secondary_key.load(); + if guard.is_none() { + return None; + } + + Some(SphinxKeyGuard::Secondary(SecondaryKeyGuard { guard })) + } + + pub(crate) fn set_secondary(&self, new_key: SphinxPrivateKey) { + self.inner.secondary_key.store(Some(Arc::new(new_key))) + } + + pub(crate) fn primary_key_rotation_id(&self) -> u32 { + self.inner.primary_key.load().rotation_id() + } + + pub(crate) fn secondary_key_rotation_id(&self) -> Option { + self.inner + .secondary_key + .load() + .as_ref() + .map(|k| k.rotation_id()) + } + + // set the secondary (pre-announced key) as the primary + // and the current primary as the secondary (for the overlap epoch) + pub(crate) fn rotate(&self, expected_new_rotation: u32) -> bool { + let Some(pre_announced) = self.inner.secondary_key.load_full() else { + error!("sphinx key inconsistency - attempted to perform key rotation without having pre-announced new key"); + return false; + }; + + if pre_announced.rotation_id() != expected_new_rotation { + error!("sphinx key inconsistency - pre-announced key rotation id != primary + 1"); + return false; + } + + let old_primary = self.inner.primary_key.swap(pre_announced); + self.inner.secondary_key.store(Some(old_primary)); + true + } + + pub(crate) fn deactivate_secondary(&self) { + self.inner.secondary_key.store(None); + } +} + +pub(crate) enum SphinxKeyGuard { + // Primary(Guard>), + Primary(Guard>), + Secondary(SecondaryKeyGuard), +} + +impl Deref for SphinxKeyGuard { + type Target = SphinxPrivateKey; + + fn deref(&self) -> &Self::Target { + match self { + SphinxKeyGuard::Primary(g) => g.deref(), + SphinxKeyGuard::Secondary(g) => g.deref(), + } + } +} + +// enum SecondaryKey { +// PreAnnounced(SphinxPrivateKey), +// PreviousOverlap(SphinxPrivateKey), +// } + +// impl Deref for SecondaryKey { +// type Target = SphinxPrivateKey; +// +// fn deref(&self) -> &Self::Target { +// match self { +// SecondaryKey::PreAnnounced(key) => &key, +// SecondaryKey::PreviousOverlap(key) => &key, +// } +// } +// } + +pub(crate) struct SecondaryKeyGuard { + guard: Guard>>, + // guard: Guard>>, +} + +impl Deref for SecondaryKeyGuard { + type Target = SphinxPrivateKey; + + fn deref(&self) -> &Self::Target { + // SAFETY: the guard is ONLY constructed when the key is 'Some' + #[allow(clippy::unwrap_used)] + self.guard.as_ref().unwrap() + } +} diff --git a/nym-node/src/node/key_rotation/controller.rs b/nym-node/src/node/key_rotation/controller.rs new file mode 100644 index 00000000000..15fb11d3be4 --- /dev/null +++ b/nym-node/src/node/key_rotation/controller.rs @@ -0,0 +1,381 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::config::Config; +use crate::node::key_rotation::manager::SphinxKeyManager; +use crate::node::nym_apis_client::NymApisClient; +use crate::node::replay_protection::manager::ReplayProtectionBloomfiltersManager; +use futures::pin_mut; +use nym_task::ShutdownToken; +use nym_validator_client::models::{KeyRotationInfoResponse, KeyRotationState}; +use std::time::Duration; +use time::OffsetDateTime; +use tokio::time::{interval, sleep, Instant}; +use tracing::{debug, error, info, trace, warn}; + +pub(crate) struct RotationConfig { + epoch_duration: Duration, + rotation_state: KeyRotationState, +} + +impl RotationConfig { + fn rotation_lifetime(&self) -> Duration { + (self.rotation_state.validity_epochs + 1) * self.epoch_duration + } +} + +impl From for RotationConfig { + fn from(value: KeyRotationInfoResponse) -> Self { + RotationConfig { + epoch_duration: value.epoch_duration, + rotation_state: value.key_rotation_state, + } + } +} + +pub(crate) struct KeyRotationController { + // regular polling rate to catch any changes in the system config. they shouldn't happen too often + // so the requests can be sent quite infrequently + regular_polling_interval: Duration, + + rotation_config: RotationConfig, + replay_protection_manager: ReplayProtectionBloomfiltersManager, + client: NymApisClient, + managed_keys: SphinxKeyManager, + shutdown_token: ShutdownToken, +} + +struct NextAction { + typ: KeyRotationActionState, + deadline: OffsetDateTime, +} + +impl NextAction { + fn new(typ: KeyRotationActionState, deadline: OffsetDateTime) -> Self { + NextAction { typ, deadline } + } + + fn until_deadline(&self) -> Duration { + let now = OffsetDateTime::now_utc(); + Duration::try_from(self.deadline - now).unwrap_or_else(|_| { + // deadline is already in the past + Duration::from_nanos(0) + }) + } + + fn wait(duration: Duration) -> NextAction { + NextAction::new( + KeyRotationActionState::Wait, + OffsetDateTime::now_utc() + duration, + ) + } + + fn pre_announce(rotation_id: u32, deadline: OffsetDateTime) -> Self { + NextAction::new( + KeyRotationActionState::PreAnnounce { rotation_id }, + deadline, + ) + } + + fn swap_default(expected_new_rotation: u32, deadline: OffsetDateTime) -> Self { + NextAction::new( + KeyRotationActionState::SwapDefault { + expected_new_rotation, + }, + deadline, + ) + } + + fn purge_secondary(deadline: OffsetDateTime) -> Self { + NextAction::new(KeyRotationActionState::PurgeOld, deadline) + } +} + +#[derive(Debug, Clone, Copy)] +enum KeyRotationActionState { + // generate and pre-announce new key to the nym-api(s) + PreAnnounce { rotation_id: u32 }, + + // perform the following exchange + // primary -> secondary + // pre_announced -> primary + SwapDefault { expected_new_rotation: u32 }, + + // remove the old overlap key and purge associated data like the replay detection bloomfilter + PurgeOld, + + // a no-op action that has only a single purpose - wait (used to handle slight desyncs) + Wait, +} + +impl KeyRotationController { + pub(crate) fn new( + config: &Config, + rotation_config: RotationConfig, + client: NymApisClient, + replay_protection_manager: ReplayProtectionBloomfiltersManager, + managed_keys: SphinxKeyManager, + shutdown_token: ShutdownToken, + ) -> Self { + KeyRotationController { + regular_polling_interval: config + .mixnet + .key_rotation + .debug + .rotation_state_poling_interval, + rotation_config, + replay_protection_manager, + client, + managed_keys, + shutdown_token, + } + } + + async fn try_determine_next_action(&self) -> NextAction { + let now = OffsetDateTime::now_utc(); + let Some(key_rotation_info) = self.try_get_key_rotation_info().await else { + warn!("failed to retrieve key rotation information"); + return NextAction::wait(Duration::from_secs(240)); + }; + + // check if we think the epoch is stuck (we're already 20% or more into following epoch with no advancement) + if key_rotation_info.is_epoch_stuck() { + warn!("the epoch is stuck - can't progress with key rotation"); + return NextAction::wait(Duration::from_secs(240)); + } + + // >>>>> START: determine if we called this method pre-maturely due to clock skew + + // current rotation id as determined by the current epoch id + let current_rotation_id = key_rotation_info.current_key_rotation_id(); + + // expected rotation id as determined by the current TIME + // used to determined epoch stalling or clocks being slightly out of sync + let expected_current_rotation_id = key_rotation_info.expected_current_rotation_id(); + + if current_rotation_id != expected_current_rotation_id { + warn!("the current rotation is {current_rotation_id} whilst we expected {expected_current_rotation_id}"); + // if we got here, it means epoch is most likely NOT stuck (we're within the threshold) + // so probably we prematurely called this method before nym-api(s) got to advancing + // the epoch and thus the rotation, so wait a bit instead. + return NextAction::wait(Duration::from_secs(30)); + } + // >>>>> END: determine if we called this method pre-maturely due to clock skew + + // if we're less than 30s until next rotation, we probably started our binary in a rather + // unfortunate time, just wait until the next rotation rather than do all the work only to throw it + // away immediately + let Some(until_next_rotation) = key_rotation_info.until_next_rotation() else { + warn!("failed to determine time remaining until the next key rotation"); + return NextAction::wait(Duration::from_secs(30)); + }; + if until_next_rotation < Duration::from_secs(30) { + debug!("less than 30s until next rotation - waiting until until then"); + return NextAction::wait(Duration::from_secs(30)); + } + + let current_epoch = key_rotation_info.current_absolute_epoch_id; + + // epoch id of when the current rotation has started + let current_rotation_start_epoch = key_rotation_info.current_rotation_starting_epoch_id(); + + // epoch id of when the new rotation id is meant to start + let next_rotation_start_epoch = key_rotation_info.next_rotation_starting_epoch_id(); + + let secondary_key_rotation_id = self.managed_keys.keys.secondary_key_rotation_id(); + let primary_key_rotation_id = self.managed_keys.keys.primary_key_rotation_id(); + + debug!( + "current rotation: {current_rotation_id}, primary: {}, secondary: {secondary_key_rotation_id:?}", + self.managed_keys.keys.primary_key_rotation_id() + ); + + let rotates_next_epoch = next_rotation_start_epoch == current_epoch + 1; + let next_rotation_id = current_rotation_id + 1; + + let Some(secondary_key_rotation_id) = secondary_key_rotation_id else { + debug!("we don't have a secondary key"); + // figure out if we already have appropriate key (like we crashed or this is the first time node is running) + // or whether we have to regenerate anything or, which is the most likely case, we're waiting to + // pre-announce new key for the following rotation + + if primary_key_rotation_id != current_rotation_id { + warn!("current primary key does not correspond to the current rotation - immediately pre-announcing new key (rotates next epoch: {rotates_next_epoch}"); + // we don't have a secondary key and our current key is already outdated - + // preannounce a key for either this or the next rotation + // (and next time this method is called, it will be promoted to primary) + return if rotates_next_epoch { + NextAction::pre_announce(next_rotation_id, now) + } else { + NextAction::pre_announce(current_rotation_id, now) + }; + } + + // we have a primary key corresponding to the current rotation, so we just have to pre-announce + // a key for the next rotation an epoch before the rotation + let deadline = key_rotation_info.epoch_start_time(next_rotation_start_epoch - 1); + debug!( + "going to pre-announce secondary key for rotation {next_rotation_id} on {deadline}" + ); + return NextAction::pre_announce(next_rotation_id, deadline); + }; + + // the current secondary key corresponds to the next rotation, i.e. this is the pre-announced key + if secondary_key_rotation_id == next_rotation_id { + debug!("secondary key is for the NEXT rotation - we need to swap into it"); + + let deadline = key_rotation_info.epoch_start_time(next_rotation_start_epoch); + return NextAction::swap_default(next_rotation_id, deadline); + } + + if secondary_key_rotation_id == current_rotation_id { + debug!("secondary key is for the CURRENT rotation - we need to swap into it"); + + return NextAction::swap_default(current_rotation_id, now); + } + + if secondary_key_rotation_id < current_rotation_id { + let deadline = if secondary_key_rotation_id == current_rotation_id - 1 { + debug!("secondary key is from the PREVIOUS rotations - we need to purge it"); + // we purge the key after the end of overlap period, i.e. during the 2nd epoch of a rotation + key_rotation_info.epoch_start_time(current_rotation_start_epoch + 1) + } else { + debug!("secondary key is from AN OLD rotation - we need to purge it"); + // the key is from some old rotation, we were probably offline for some time - we need to pre-announce new key + // for the upcoming rotation, so start off by purging this key immediately + now + }; + + return NextAction::purge_secondary(deadline); + } + + // at this point all branches should have been covered, i.e. missing secondary key, + // secondary key == next rotation + // secondary key == current rotation + // secondary key < current rotation + // the only, theoretical, branch is if secondary key was from few rotations in the future, + // but this would require some weird chain shenanigans + error!("this code branch should have been unreachable - please report if you see this error with the following information:\ + primary_key_rotation = {primary_key_rotation_id}, + secondary_key_rotation = {secondary_key_rotation_id}, + current_rotation = {current_rotation_id}, + next_rotation = {next_rotation_id}, + raw_response = {key_rotation_info:?}"); + + NextAction::wait(Duration::from_secs(240)) + } + + async fn try_get_key_rotation_info(&self) -> Option { + let Ok(rotation_info) = self.client.get_key_rotation_info().await else { + warn!("failed to retrieve key rotation information from ANY nym-api - we might miss configuration changes"); + return None; + }; + + Some(rotation_info) + } + + async fn pre_announce_new_key(&self, rotation_id: u32) { + info!("pre-announcing new key for rotation {rotation_id}"); + if let Err(err) = self.managed_keys.generate_key_for_new_rotation(rotation_id) { + error!("failed to generate and store new sphinx key: {err}"); + return; + }; + + if self + .replay_protection_manager + .allocate_pre_announced(rotation_id, self.rotation_config.rotation_lifetime()) + .is_err() + { + // mutex poisoning - we have to exit + self.shutdown_token.cancel(); + } + + // no need to send the information explicitly to nym-apis, as they're scheduled to refresh + // self-described endpoints of all nodes before the key rotation epoch rolls over + } + + fn swap_default_key(&self, expected_new_rotation: u32) { + info!("attempting to swap the primary key to the previously generated one"); + if let Err(err) = self.managed_keys.rotate_keys(expected_new_rotation) { + error!("failed to perform sphinx key swap: {err}") + }; + if self + .replay_protection_manager + .promote_pre_announced() + .is_err() + { + // mutex poisoning - we have to exit + self.shutdown_token.cancel(); + } + } + + fn purge_old_rotation_data(&self) { + info!("purging data associated with the old sphinx key"); + if let Err(err) = self.managed_keys.remove_overlap_key() { + error!("failed to remove old sphinx key: {err}"); + }; + if self.replay_protection_manager.purge_secondary().is_err() { + // mutex poisoning - we have to exit + self.shutdown_token.cancel(); + } + } + + async fn execute_next_action(&self, action: KeyRotationActionState) { + match action { + KeyRotationActionState::PreAnnounce { rotation_id } => { + self.pre_announce_new_key(rotation_id).await + } + KeyRotationActionState::SwapDefault { + expected_new_rotation, + } => self.swap_default_key(expected_new_rotation), + KeyRotationActionState::PurgeOld => { + self.purge_old_rotation_data(); + } + KeyRotationActionState::Wait => {} + } + } + + pub(crate) async fn run(&self) { + info!("starting sphinx key rotation controller"); + + let mut polling_interval = interval(self.regular_polling_interval); + polling_interval.reset(); + + let mut next_action = self.try_determine_next_action().await; + debug!( + "next key rotation action to take: {:?} at {}", + next_action.typ, next_action.deadline + ); + let state_update_future = sleep(next_action.until_deadline()); + pin_mut!(state_update_future); + + while !self.shutdown_token.is_cancelled() { + tokio::select! { + biased; + _ = self.shutdown_token.cancelled() => { + trace!("KeyRotationController: Received shutdown"); + break; + } + _ = polling_interval.tick() => {} + _ = &mut state_update_future => { + self.execute_next_action(next_action.typ).await + } + } + + next_action = self.try_determine_next_action().await; + debug!( + "next key rotation action to take: {:?} at {}", + next_action.typ, next_action.deadline + ); + state_update_future + .as_mut() + .reset(Instant::now() + next_action.until_deadline()); + } + + trace!("KeyRotationController: exiting") + } + + pub(crate) fn start(self) { + tokio::spawn(async move { self.run().await }); + } +} diff --git a/nym-node/src/node/key_rotation/key.rs b/nym-node/src/node/key_rotation/key.rs new file mode 100644 index 00000000000..7d31455ae51 --- /dev/null +++ b/nym-node/src/node/key_rotation/key.rs @@ -0,0 +1,137 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use nym_crypto::aes::cipher::crypto_common::rand_core::{CryptoRng, RngCore}; +use nym_crypto::asymmetric::x25519; +use nym_pemstore::traits::PemStorableKey; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum MalformedSphinxKey { + #[error("inner x25519 key is malformed: {0}")] + X25519Failure(#[from] x25519::KeyRecoveryError), + + #[error("did not receive sufficient number of bytes to recover the key")] + Incomplete, +} + +pub(crate) struct SphinxPrivateKey { + rotation_id: u32, + inner: x25519::PrivateKey, +} + +impl SphinxPrivateKey { + pub(crate) fn new(rng: &mut R, rotation_id: u32) -> Self { + SphinxPrivateKey { + rotation_id, + inner: x25519::PrivateKey::new(rng), + } + } + + pub(crate) fn import(key: x25519::PrivateKey, rotation_id: u32) -> Self { + SphinxPrivateKey { + rotation_id, + inner: key, + } + } + + pub(crate) fn x25519_pubkey(&self) -> x25519::PublicKey { + self.inner.public_key() + } + + pub(crate) fn inner(&self) -> &x25519::PrivateKey { + &self.inner + } + + pub(crate) fn is_even_rotation(&self) -> bool { + self.rotation_id & 1 == 0 + } + + pub(crate) fn rotation_id(&self) -> u32 { + self.rotation_id + } +} + +impl From<&SphinxPrivateKey> for SphinxPublicKey { + fn from(value: &SphinxPrivateKey) -> Self { + SphinxPublicKey { + rotation_id: value.rotation_id, + inner: (&value.inner).into(), + } + } +} + +impl AsRef for SphinxPrivateKey { + fn as_ref(&self) -> &x25519::PrivateKey { + &self.inner + } +} + +pub(crate) struct SphinxPublicKey { + pub(crate) rotation_id: u32, + pub(crate) inner: x25519::PublicKey, +} + +impl AsRef for SphinxPublicKey { + fn as_ref(&self) -> &x25519::PublicKey { + &self.inner + } +} + +impl PemStorableKey for SphinxPrivateKey { + type Error = MalformedSphinxKey; + + fn pem_type() -> &'static str { + // it's fine (and actually desired) to attach 'SPHINX' here, as this is not a valid X25519 key by itself. + // this is because it also contains the encoded rotation id + "X25519 SPHINX PRIVATE KEY" + } + + fn to_bytes(&self) -> Vec { + self.rotation_id + .to_be_bytes() + .into_iter() + .chain(self.inner.to_bytes()) + .collect() + } + + fn from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != x25519::PRIVATE_KEY_SIZE + 4 { + return Err(MalformedSphinxKey::Incomplete); + } + // SAFETY: we just checked we have sufficient bytes available + #[allow(clippy::unwrap_used)] + let rotation_id = u32::from_be_bytes(bytes[..4].try_into().unwrap()); + + Ok(SphinxPrivateKey { + rotation_id, + inner: x25519::PrivateKey::from_bytes(&bytes[4..])?, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::SeedableRng; + use rand_chacha::ChaCha20Rng; + + #[test] + fn private_key_bytes_convertion() { + // Set up a deterministic RNG. + let seed = [42u8; 32]; + let mut rng = ChaCha20Rng::from_seed(seed); + + let key = SphinxPrivateKey { + rotation_id: 42, + inner: x25519::PrivateKey::new(&mut rng), + }; + + let bytes = key.to_bytes(); + assert_eq!(bytes.len(), 36); // 32 bytes for x25519 key and 4 bytes for rotation id + let recovered_key = SphinxPrivateKey::from_bytes(bytes.as_slice()).unwrap(); + + assert_eq!(recovered_key.rotation_id, 42); + assert_eq!(recovered_key.inner.to_bytes(), key.inner.to_bytes()); + } +} diff --git a/nym-node/src/node/key_rotation/manager.rs b/nym-node/src/node/key_rotation/manager.rs new file mode 100644 index 00000000000..0a4c0df27dc --- /dev/null +++ b/nym-node/src/node/key_rotation/manager.rs @@ -0,0 +1,187 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::error::{KeyIOFailure, NymNodeError}; +use crate::node::helpers::{load_key, store_key}; +use crate::node::key_rotation::active_keys::ActiveSphinxKeys; +use crate::node::key_rotation::key::{SphinxPrivateKey, SphinxPublicKey}; +use rand::rngs::OsRng; +use rand::{CryptoRng, RngCore}; +use std::fs; +use std::path::{Path, PathBuf}; +use tracing::{trace, warn}; + +pub(crate) struct SphinxKeyManager { + pub(crate) keys: ActiveSphinxKeys, + + primary_key_path: PathBuf, + secondary_key_path: PathBuf, +} + +impl SphinxKeyManager { + // only called by newly initialised nym-nodes + pub(crate) fn initialise_new( + rng: &mut R, + current_rotation_id: u32, + primary_key_path: P, + secondary_key_path: P, + ) -> Result + where + R: RngCore + CryptoRng, + P: AsRef, + { + let primary = SphinxPrivateKey::new(rng, current_rotation_id); + trace!("attempting to store primary x25519 sphinx key"); + + let primary_key_path = primary_key_path.as_ref(); + store_key(&primary, primary_key_path, "x25519 sphinx")?; + + Ok(SphinxKeyManager { + keys: ActiveSphinxKeys::new_fresh(primary), + primary_key_path: primary_key_path.to_path_buf(), + secondary_key_path: secondary_key_path.as_ref().to_path_buf(), + }) + } + + // moves the primary key to the secondary file + // and vice verse, i.e. secondary to the primary + fn swap_key_files>( + primary_path: P, + secondary_path: P, + ) -> Result<(), NymNodeError> { + let tmp_path = primary_path.as_ref().with_extension("tmp"); + + // 1. COPY: primary -> temp + fs::copy(primary_path.as_ref(), &tmp_path).map_err(|err| KeyIOFailure::KeyCopyFailure { + key: "old x25519 sphinx primary".to_string(), + source: primary_path.as_ref().to_path_buf(), + destination: tmp_path.clone(), + err, + })?; + + // 2. MOVE: secondary -> primary + fs::rename(secondary_path.as_ref(), primary_path.as_ref()).map_err(|err| { + KeyIOFailure::KeyMoveFailure { + key: "x25519 sphinx secondary".to_string(), + source: secondary_path.as_ref().to_path_buf(), + destination: primary_path.as_ref().to_path_buf(), + err, + } + })?; + + // 3. MOVE temp -> secondary + fs::rename(&tmp_path, secondary_path.as_ref()).map_err(|err| { + KeyIOFailure::KeyMoveFailure { + key: "old x25519 sphinx primary".to_string(), + source: tmp_path.clone(), + destination: primary_path.as_ref().to_path_buf(), + err, + } + })?; + + Ok(()) + } + + pub(crate) fn generate_key_for_new_rotation( + &self, + expected_rotation: u32, + ) -> Result { + let mut rng = OsRng; + let new = SphinxPrivateKey::new(&mut rng, expected_rotation); + let pub_key = (&new).into(); + store_key( + &new, + &self.secondary_key_path, + "x22519 (pre-announced) sphinx", + )?; + + self.keys.set_secondary(new); + Ok(pub_key) + } + + pub(crate) fn rotate_keys(&self, expected_new_rotation: u32) -> Result<(), NymNodeError> { + if !self.keys.rotate(expected_new_rotation) { + self.generate_key_for_new_rotation(expected_new_rotation)?; + self.keys.rotate(expected_new_rotation); + } + Self::swap_key_files(&self.primary_key_path, &self.secondary_key_path) + } + + pub(crate) fn remove_overlap_key(&self) -> Result<(), NymNodeError> { + self.keys.deactivate_secondary(); + fs::remove_file(&self.secondary_key_path).map_err(|err| { + KeyIOFailure::KeyRemovalFailure { + key: "old x25519 sphinx secondary".to_string(), + path: self.secondary_key_path.clone(), + err, + } + })?; + Ok(()) + } + + pub(crate) fn try_load_or_regenerate>( + current_rotation_id: u32, + primary_key_path: P, + secondary_key_path: P, + ) -> Result { + // if the temporary key exists, it means we crashed in the middle of rotating the key. + // rather than trying to figure out which exact step failed, just delete it and it will be redone + // (we still have the two keys, they just might be in the wrong order) + let tmp_location = primary_key_path.as_ref().with_extension("tmp"); + if tmp_location.exists() { + warn!("we seem to have crashed in the middle of rotating the sphinx key"); + fs::remove_file(&tmp_location).map_err(|err| KeyIOFailure::KeyRemovalFailure { + key: "old x25519 sphinx (temp location)".to_string(), + path: tmp_location, + err, + })?; + } + + // primary key should always be present + let mut primary: SphinxPrivateKey = + load_key(primary_key_path.as_ref(), "x25519 sphinx primary")?; + + let mut secondary: Option = if secondary_key_path.as_ref().exists() { + Some(load_key( + secondary_key_path.as_ref(), + "x25519 sphinx secondary", + )?) + } else { + None + }; + + let primary_id = primary.rotation_id(); + let secondary_id = secondary.as_ref().map(|k| k.rotation_id()); + + // 1. check for failed (or missed) rotation, i.e. secondary > primary AND current_rotation > primary + if let Some(secondary_id) = secondary_id { + if secondary_id > primary_id && current_rotation_id > primary_id { + Self::swap_key_files(primary_key_path.as_ref(), secondary_key_path.as_ref())?; + // SAFETY: we just checked secondary exists + #[allow(clippy::unwrap_used)] + let tmp = secondary.take().unwrap(); + secondary = Some(primary); + primary = tmp; + } + } + + // if upon loading it turns out that the node has been inactive for a long time, + // immediately rotate keys (but leave 1h grace period for current primary, i.e. set it as secondary) + if primary.rotation_id() != current_rotation_id { + warn!("this node has been inactive for more than a key rotation duration. the current primary key was generated for rotation {} while the current rotation is {current_rotation_id}. new key will be generated now.", primary.rotation_id()); + let this = SphinxKeyManager { + keys: ActiveSphinxKeys::new_loaded(primary, None), + primary_key_path: primary_key_path.as_ref().to_path_buf(), + secondary_key_path: secondary_key_path.as_ref().to_path_buf(), + }; + this.generate_key_for_new_rotation(current_rotation_id)?; + return Ok(this); + } + + Ok(SphinxKeyManager { + keys: ActiveSphinxKeys::new_loaded(primary, secondary), + primary_key_path: primary_key_path.as_ref().to_path_buf(), + secondary_key_path: secondary_key_path.as_ref().to_path_buf(), + }) + } +} diff --git a/nym-node/src/node/key_rotation/mod.rs b/nym-node/src/node/key_rotation/mod.rs new file mode 100644 index 00000000000..9e89bc4d28e --- /dev/null +++ b/nym-node/src/node/key_rotation/mod.rs @@ -0,0 +1,7 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +pub(crate) mod active_keys; +pub(crate) mod controller; +pub(crate) mod key; +pub(crate) mod manager; diff --git a/nym-node/src/node/mixnet/handler.rs b/nym-node/src/node/mixnet/handler.rs index 1a6cf87ad35..b355f097c4f 100644 --- a/nym-node/src/node/mixnet/handler.rs +++ b/nym-node/src/node/mixnet/handler.rs @@ -8,9 +8,11 @@ use nym_sphinx_framing::codec::NymCodec; use nym_sphinx_framing::packet::FramedNymPacket; use nym_sphinx_framing::processing::{ process_framed_packet, MixProcessingResult, MixProcessingResultData, PacketProcessingError, - PartiallyUnwrappedPacket, ProcessedFinalHop, + PartiallyUnwrappedPacket, PartialyUnwrappedPacketWithKeyRotation, ProcessedFinalHop, }; +use nym_sphinx_params::SphinxKeyRotation; use nym_sphinx_types::{Delay, REPLAY_TAG_SIZE}; +use std::collections::HashMap; use std::mem; use std::net::SocketAddr; use tokio::net::TcpStream; @@ -19,41 +21,50 @@ use tokio_util::codec::Framed; use tracing::{debug, error, instrument, trace, warn}; struct PendingReplayCheckPackets { - packets: Vec, + // map of rotation id used for packet creation to the packets + packets: HashMap>, last_acquired_mutex: Instant, } impl PendingReplayCheckPackets { fn new() -> PendingReplayCheckPackets { PendingReplayCheckPackets { - packets: vec![], + packets: Default::default(), last_acquired_mutex: Instant::now(), } } - fn reset(&mut self, now: Instant) -> Vec { + fn reset(&mut self, now: Instant) -> HashMap> { self.last_acquired_mutex = now; mem::take(&mut self.packets) } - fn push(&mut self, now: Instant, packet: PartiallyUnwrappedPacket) { + fn push(&mut self, now: Instant, packet: PartialyUnwrappedPacketWithKeyRotation) { if self.packets.is_empty() { self.last_acquired_mutex = now; } - self.packets.push(packet); + self.packets + .entry(packet.used_key_rotation) + .or_default() + .push(packet.packet) } - fn replay_tags(&self) -> Vec<&[u8; REPLAY_TAG_SIZE]> { - let mut replay_tags = Vec::with_capacity(self.packets.len()); - for packet in &self.packets { - let Some(replay_tag) = packet.replay_tag() else { - error!( - "corrupted batch of {} packets - replay tag was missing", - self.packets.len() - ); - return Vec::new(); - }; - replay_tags.push(replay_tag); + fn replay_tags(&self) -> HashMap> { + let mut replay_tags = HashMap::with_capacity(self.packets.len()); + 'outer: for (rotation_id, packets) in &self.packets { + let mut rotation_replay_tags = Vec::with_capacity(packets.len()); + for packet in packets { + let Some(replay_tag) = packet.replay_tag() else { + error!( + "corrupted batch of {} packets - replay tag was missing", + self.packets.len() + ); + replay_tags.insert(*rotation_id, Vec::new()); + continue 'outer; + }; + rotation_replay_tags.push(replay_tag); + } + replay_tags.insert(*rotation_id, rotation_replay_tags); } replay_tags } @@ -212,6 +223,56 @@ impl ConnectionHandler { time_threshold && count_threshold } + fn try_partially_unwrap_packet( + &self, + packet: FramedNymPacket, + ) -> Result { + // based on the received sphinx key rotation information, + // attempt to choose appropriate key for processing the packet + match packet.header().key_rotation { + SphinxKeyRotation::Unknown => { + let primary = self.shared.sphinx_keys.primary(); + let primary_rotation = primary.rotation_id(); + + // we have to try both keys, start with the primary as it has higher likelihood of being correct + // if let Ok(partially_unwrapped) = PartiallyUnwrappedPacket::new() + match PartiallyUnwrappedPacket::new(packet, primary.inner().as_ref()) { + Ok(unwrapped_packet) => { + Ok(unwrapped_packet.with_key_rotation(primary_rotation)) + } + Err((packet, err)) => { + if let Some(secondary) = self.shared.sphinx_keys.secondary() { + let secondary_rotation = secondary.rotation_id(); + PartiallyUnwrappedPacket::new(packet, secondary.inner().as_ref()) + .map_err(|(_, err)| err) + .map(|p| p.with_key_rotation(secondary_rotation)) + } else { + Err(err) + } + } + } + } + SphinxKeyRotation::OddRotation => { + let Some(odd_key) = self.shared.sphinx_keys.odd() else { + return Err(PacketProcessingError::ExpiredKey); + }; + let odd_rotation = odd_key.rotation_id(); + PartiallyUnwrappedPacket::new(packet, odd_key.inner().as_ref()) + .map_err(|(_, err)| err) + .map(|p| p.with_key_rotation(odd_rotation)) + } + SphinxKeyRotation::EvenRotation => { + let Some(even_key) = self.shared.sphinx_keys.even() else { + return Err(PacketProcessingError::ExpiredKey); + }; + let even_rotation = even_key.rotation_id(); + PartiallyUnwrappedPacket::new(packet, even_key.inner().as_ref()) + .map_err(|(_, err)| err) + .map(|p| p.with_key_rotation(even_rotation)) + } + } + } + async fn handle_received_packet_with_replay_detection( &mut self, now: Instant, @@ -219,10 +280,7 @@ impl ConnectionHandler { ) { // 1. derive and expand shared secret // also check the header integrity - let partially_unwrapped = match PartiallyUnwrappedPacket::new( - packet, - self.shared.sphinx_keys.private_key().as_ref(), - ) { + let partially_unwrapped = match self.try_partially_unwrap_packet(packet) { Ok(unwrapped) => unwrapped, Err(err) => { trace!("failed to process received mix packet: {err}"); @@ -277,17 +335,24 @@ impl ConnectionHandler { async fn handle_post_replay_detection_packets( &self, now: Instant, - packets: Vec, - replay_check_results: Vec, + packets: HashMap>, + replay_check_results: HashMap>, ) { - for (packet, replayed) in packets.into_iter().zip(replay_check_results) { - let unwrapped_packet = if replayed { - Err(PacketProcessingError::PacketReplay) - } else { - packet.finalise_unwrapping() + for (rotation_id, packets) in packets { + let Some(replay_checks) = replay_check_results.get(&rotation_id) else { + // this should never happen, but if we messed up, and it does, don't panic, just drop the packets + error!("inconsistent replay check result - no values for rotation {rotation_id}"); + continue; }; - - self.handle_unwrapped_packet(now, unwrapped_packet).await; + for (packet, &replayed) in packets.into_iter().zip(replay_checks) { + let unwrapped_packet = if replayed { + Err(PacketProcessingError::PacketReplay) + } else { + packet.finalise_unwrapping() + }; + + self.handle_unwrapped_packet(now, unwrapped_packet).await; + } } } @@ -340,6 +405,43 @@ impl ConnectionHandler { .await; } + fn try_full_unwrap_packet( + &self, + packet: FramedNymPacket, + ) -> Result { + // based on the received sphinx key rotation information, + // attempt to choose appropriate key for processing the packet + // NOTE: due to the function signatures, outfox packets will **only** attempt primary key + // if no rotation information is available (but that's fine given outfox is not really in use, + // and by the time we need it, the rotation info should be present) + match packet.header().key_rotation { + SphinxKeyRotation::Unknown => { + process_framed_packet(packet, self.shared.sphinx_keys.primary().inner().as_ref()) + } + SphinxKeyRotation::OddRotation => { + let Some(odd_key) = self.shared.sphinx_keys.odd() else { + return Err(PacketProcessingError::ExpiredKey); + }; + process_framed_packet(packet, odd_key.inner().as_ref()) + } + SphinxKeyRotation::EvenRotation => { + let Some(even_key) = self.shared.sphinx_keys.even() else { + return Err(PacketProcessingError::ExpiredKey); + }; + process_framed_packet(packet, even_key.inner().as_ref()) + } + } + } + + async fn handle_received_packet_with_no_replay_detection( + &mut self, + now: Instant, + packet: FramedNymPacket, + ) { + let unwrapped_packet = self.try_full_unwrap_packet(packet); + self.handle_unwrapped_packet(now, unwrapped_packet).await; + } + #[instrument(skip(self, packet), level = "debug")] async fn handle_received_nym_packet(&mut self, packet: FramedNymPacket) { let now = Instant::now(); @@ -352,9 +454,8 @@ impl ConnectionHandler { } else { // otherwise just skip that whole procedure and go straight to payload unwrapping // (assuming the basic framing is valid) - let unwrapped_packet = - process_framed_packet(packet, self.shared.sphinx_keys.private_key().as_ref()); - self.handle_unwrapped_packet(now, unwrapped_packet).await; + self.handle_received_packet_with_no_replay_detection(now, packet) + .await; }; } diff --git a/nym-node/src/node/mixnet/packet_forwarding/mod.rs b/nym-node/src/node/mixnet/packet_forwarding/mod.rs index 109398f90c7..e397124ee08 100644 --- a/nym-node/src/node/mixnet/packet_forwarding/mod.rs +++ b/nym-node/src/node/mixnet/packet_forwarding/mod.rs @@ -58,32 +58,20 @@ impl PacketForwarder { C: SendWithoutResponse, F: RoutingFilter, { - let next_hop = packet.next_hop(); + let next_hop = packet.next_hop_address(); - let packet_type = packet.packet_type(); - let packet = packet.into_packet(); - - if let Err(err) = self - .mixnet_client - .send_without_response(next_hop, packet, packet_type) - { + if let Err(err) = self.mixnet_client.send_without_response(packet) { if err.kind() == io::ErrorKind::WouldBlock { // we only know for sure if we dropped a packet if our sending queue was full // in any other case the connection might still be re-established (or created for the first time) // and the packet might get sent, but we won't know about it - self.metrics - .mixnet - .egress_dropped_forward_packet(next_hop.into()) + self.metrics.mixnet.egress_dropped_forward_packet(next_hop) } else if err.kind() == io::ErrorKind::NotConnected { // let's give the benefit of the doubt and assume we manage to establish connection - self.metrics - .mixnet - .egress_sent_forward_packet(next_hop.into()) + self.metrics.mixnet.egress_sent_forward_packet(next_hop) } } else { - self.metrics - .mixnet - .egress_sent_forward_packet(next_hop.into()) + self.metrics.mixnet.egress_sent_forward_packet(next_hop) } } diff --git a/nym-node/src/node/mixnet/shared/mod.rs b/nym-node/src/node/mixnet/shared/mod.rs index 13f6922eaff..37318505a77 100644 --- a/nym-node/src/node/mixnet/shared/mod.rs +++ b/nym-node/src/node/mixnet/shared/mod.rs @@ -2,10 +2,10 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::config::Config; +use crate::node::key_rotation::active_keys::ActiveSphinxKeys; use crate::node::mixnet::handler::ConnectionHandler; use crate::node::mixnet::SharedFinalHopData; -use crate::node::replay_protection::bloomfilter::ReplayProtectionBloomfilter; -use nym_crypto::asymmetric::x25519; +use crate::node::replay_protection::bloomfilter::ReplayProtectionBloomfilters; use nym_gateway::node::GatewayStorageError; use nym_mixnet_client::forwarder::{MixForwardingSender, PacketToForward}; use nym_node_metrics::mixnet::PacketKind; @@ -18,7 +18,6 @@ use nym_sphinx_types::DestinationAddressBytes; use nym_task::ShutdownToken; use std::io; use std::net::{IpAddr, SocketAddr}; -use std::sync::Arc; use std::time::Duration; use tokio::net::TcpStream; use tokio::task::JoinHandle; @@ -66,8 +65,8 @@ impl ProcessingConfig { // explicitly do NOT derive clone as we want to manually apply relevant suffixes to the task clients pub(crate) struct SharedData { pub(super) processing_config: ProcessingConfig, - pub(super) sphinx_keys: Arc, - pub(super) replay_protection_filter: ReplayProtectionBloomfilter, + pub(super) sphinx_keys: ActiveSphinxKeys, + pub(super) replay_protection_filter: ReplayProtectionBloomfilters, // used for FORWARD mix packets and FINAL ack packets pub(super) mixnet_forwarder: MixForwardingSender, @@ -89,8 +88,8 @@ fn convert_to_metrics_version(processed: MixPacketVersion) -> PacketKind { impl SharedData { pub(crate) fn new( processing_config: ProcessingConfig, - x25519_keys: Arc, - replay_protection_filter: ReplayProtectionBloomfilter, + sphinx_keys: ActiveSphinxKeys, + replay_protection_filter: ReplayProtectionBloomfilters, mixnet_forwarder: MixForwardingSender, final_hop: SharedFinalHopData, metrics: NymNodeMetrics, @@ -98,7 +97,7 @@ impl SharedData { ) -> Self { SharedData { processing_config, - sphinx_keys: x25519_keys, + sphinx_keys, replay_protection_filter, mixnet_forwarder, final_hop, diff --git a/nym-node/src/node/mod.rs b/nym-node/src/node/mod.rs index f3b3b3b1b92..08b6f413950 100644 --- a/nym-node/src/node/mod.rs +++ b/nym-node/src/node/mod.rs @@ -9,15 +9,17 @@ use crate::config::{ use crate::error::{EntryGatewayError, NymNodeError, ServiceProvidersError}; use crate::node::description::{load_node_description, save_node_description}; use crate::node::helpers::{ - load_ed25519_identity_keypair, load_key, load_x25519_noise_keypair, load_x25519_sphinx_keypair, + get_current_rotation_id, load_ed25519_identity_keypair, load_key, load_x25519_noise_keypair, store_ed25519_identity_keypair, store_key, store_keypair, store_x25519_noise_keypair, - store_x25519_sphinx_keypair, DisplayDetails, + DisplayDetails, }; use crate::node::http::api::api_requests; -use crate::node::http::helpers::sign_host_details; use crate::node::http::helpers::system_info::get_system_info; -use crate::node::http::state::AppState; +use crate::node::http::state::{AppState, StaticNodeInformation}; use crate::node::http::{HttpServerConfig, NymNodeHttpServer, NymNodeRouter}; +use crate::node::key_rotation::active_keys::ActiveSphinxKeys; +use crate::node::key_rotation::controller::KeyRotationController; +use crate::node::key_rotation::manager::SphinxKeyManager; use crate::node::metrics::aggregator::MetricsAggregator; use crate::node::metrics::console_logger::ConsoleLogger; use crate::node::metrics::handler::client_sessions::GatewaySessionStatsHandler; @@ -28,10 +30,14 @@ use crate::node::metrics::handler::pending_egress_packets_updater::PendingEgress use crate::node::mixnet::packet_forwarding::PacketForwarder; use crate::node::mixnet::shared::ProcessingConfig; use crate::node::mixnet::SharedFinalHopData; -use crate::node::replay_protection::background_task::ReplayProtectionBackgroundTask; -use crate::node::replay_protection::bloomfilter::ReplayProtectionBloomfilter; +use crate::node::nym_apis_client::NymApisClient; +use crate::node::replay_protection::background_task::ReplayProtectionDiskFlush; +use crate::node::replay_protection::bloomfilter::ReplayProtectionBloomfilters; +use crate::node::replay_protection::manager::ReplayProtectionBloomfiltersManager; use crate::node::routing_filter::{OpenFilter, RoutingFilter}; -use crate::node::shared_network::{CachedNetwork, CachedTopologyProvider, NetworkRefresher}; +use crate::node::shared_network::{ + CachedNetwork, CachedTopologyProvider, LocalGatewayNode, NetworkRefresher, +}; use nym_bin_common::bin_info; use nym_crypto::asymmetric::{ed25519, x25519}; use nym_gateway::node::{ActiveClientsStore, GatewayTasksBuilder}; @@ -47,29 +53,28 @@ use nym_node_requests::api::v1::node::models::{AnnouncePorts, NodeDescription}; use nym_sphinx_acknowledgements::AckKey; use nym_sphinx_addressing::Recipient; use nym_task::{ShutdownManager, ShutdownToken, TaskClient}; -use nym_validator_client::client::NymApiClientExt; -use nym_validator_client::models::NodeRefreshBody; -use nym_validator_client::{NymApiClient, UserAgent}; +use nym_validator_client::UserAgent; use nym_verloc::measurements::SharedVerlocStats; use nym_verloc::{self, measurements::VerlocMeasurer}; use nym_wireguard::{peer_controller::PeerControlRequest, WireguardGatewayData}; use rand::rngs::OsRng; use rand::{CryptoRng, RngCore}; use std::net::SocketAddr; +use std::ops::Deref; use std::path::Path; use std::sync::Arc; -use std::time::Duration; use tokio::sync::mpsc; -use tokio::time::timeout; -use tracing::{debug, info, trace, warn}; +use tracing::{debug, info, trace}; use zeroize::Zeroizing; pub mod bonding_information; pub mod description; pub mod helpers; pub(crate) mod http; +pub(crate) mod key_rotation; pub(crate) mod metrics; pub(crate) mod mixnet; +mod nym_apis_client; pub(crate) mod replay_protection; mod routing_filter; mod shared_network; @@ -148,10 +153,10 @@ impl ServiceProvidersData { store_keypair( &ed25519_keys, - ed25519_paths, + &ed25519_paths, format!("{typ}-ed25519-identity"), )?; - store_keypair(&x25519_keys, x25519_paths, format!("{typ}-x25519-dh"))?; + store_keypair(&x25519_keys, &x25519_paths, format!("{typ}-x25519-dh"))?; store_key(&aes128ctr_key, ack_key_path, format!("{typ}-ack-key"))?; Ok(()) @@ -324,7 +329,7 @@ impl WireguardData { let (inner, peer_rx) = WireguardGatewayData::new( config.clone().into(), Arc::new(load_x25519_wireguard_keypair( - config.storage_paths.x25519_wireguard_storage_paths(), + &config.storage_paths.x25519_wireguard_storage_paths(), )?), ); Ok(WireguardData { inner, peer_rx }) @@ -336,7 +341,7 @@ impl WireguardData { store_keypair( &x25519_keys, - config.storage_paths.x25519_wireguard_storage_paths(), + &config.storage_paths.x25519_wireguard_storage_paths(), "wg-x25519-dh", )?; @@ -372,7 +377,7 @@ pub(crate) struct NymNode { wireguard: Option, ed25519_identity_keys: Arc, - x25519_sphinx_keys: Arc, + sphinx_key_manager: Option, // to be used when noise is integrated #[allow(dead_code)] @@ -389,25 +394,26 @@ impl NymNode { // global initialisation let ed25519_identity_keys = ed25519::KeyPair::new(&mut rng); - let x25519_sphinx_keys = x25519::KeyPair::new(&mut rng); let x25519_noise_keys = x25519::KeyPair::new(&mut rng); + let current_rotation_id = + get_current_rotation_id(&config.mixnet.nym_api_urls, &config.mixnet.nyxd_urls).await?; + let _ = SphinxKeyManager::initialise_new( + &mut rng, + current_rotation_id, + &config.storage_paths.keys.primary_x25519_sphinx_key_file, + &config.storage_paths.keys.secondary_x25519_sphinx_key_file, + )?; trace!("attempting to store ed25519 identity keypair"); store_ed25519_identity_keypair( &ed25519_identity_keys, - config.storage_paths.keys.ed25519_identity_storage_paths(), - )?; - - trace!("attempting to store x25519 sphinx keypair"); - store_x25519_sphinx_keypair( - &x25519_sphinx_keys, - config.storage_paths.keys.x25519_sphinx_storage_paths(), + &config.storage_paths.keys.ed25519_identity_storage_paths(), )?; trace!("attempting to store x25519 noise keypair"); store_x25519_noise_keypair( &x25519_noise_keys, - config.storage_paths.keys.x25519_noise_storage_paths(), + &config.storage_paths.keys.x25519_noise_storage_paths(), )?; trace!("creating description file"); @@ -434,16 +440,20 @@ impl NymNode { pub(crate) async fn new(config: Config) -> Result { let wireguard_data = WireguardData::new(&config.wireguard)?; + let current_rotation_id = + get_current_rotation_id(&config.mixnet.nym_api_urls, &config.mixnet.nyxd_urls).await?; Ok(NymNode { ed25519_identity_keys: Arc::new(load_ed25519_identity_keypair( - config.storage_paths.keys.ed25519_identity_storage_paths(), + &config.storage_paths.keys.ed25519_identity_storage_paths(), )?), - x25519_sphinx_keys: Arc::new(load_x25519_sphinx_keypair( - config.storage_paths.keys.x25519_sphinx_storage_paths(), + sphinx_key_manager: Some(SphinxKeyManager::try_load_or_regenerate( + current_rotation_id, + &config.storage_paths.keys.primary_x25519_sphinx_key_file, + &config.storage_paths.keys.secondary_x25519_sphinx_key_file, )?), x25519_noise_keys: Arc::new(load_x25519_noise_keypair( - config.storage_paths.keys.x25519_noise_storage_paths(), + &config.storage_paths.keys.x25519_noise_storage_paths(), )?), description: load_node_description(&config.storage_paths.description)?, metrics: NymNodeMetrics::new(), @@ -510,11 +520,13 @@ impl NymNode { } pub(crate) fn display_details(&self) -> Result { + let sphinx_keys = self.sphinx_keys()?; Ok(DisplayDetails { current_modes: self.config.modes, description: self.description.clone(), ed25519_identity_key: self.ed25519_identity_key().to_base58_string(), - x25519_sphinx_key: self.x25519_sphinx_key().to_base58_string(), + x25519_primary_sphinx_key: sphinx_keys.keys.primary().deref().into(), + x25519_secondary_sphinx_key: sphinx_keys.keys.secondary().map(|g| g.deref().into()), x25519_noise_key: self.x25519_noise_key().to_base58_string(), x25519_wireguard_key: self.x25519_wireguard_key()?.to_base58_string(), exit_network_requester_address: self.exit_network_requester_address().to_string(), @@ -531,22 +543,19 @@ impl NymNode { self.ed25519_identity_keys.public_key() } - pub(crate) fn x25519_sphinx_key(&self) -> &x25519::PublicKey { - self.x25519_sphinx_keys.public_key() - } - - pub(crate) fn x25519_sphinx_keys(&self) -> Arc { - self.x25519_sphinx_keys.clone() - } - pub(crate) fn x25519_noise_key(&self) -> &x25519::PublicKey { self.x25519_noise_keys.public_key() } + #[track_caller] + pub(crate) fn active_sphinx_keys(&self) -> Result { + Ok(self.sphinx_keys()?.keys.clone()) + } + async fn build_network_refresher(&self) -> Result { NetworkRefresher::initialise_new( self.config.debug.testnet, - self.user_agent(), + Self::user_agent(), self.config.mixnet.nym_api_urls.clone(), self.config.debug.topology_cache_ttl, self.config.debug.routing_nodes_check_interval, @@ -555,7 +564,7 @@ impl NymNode { .await } - fn as_gateway_topology_node(&self) -> Result { + fn as_gateway_topology_node(&self) -> Result { let ip_addresses = self.config.host.public_ips.clone(); let Some(ip) = ip_addresses.first() else { @@ -575,21 +584,15 @@ impl NymNode { .announce_ws_port .unwrap_or(self.config.gateway_tasks.ws_bind_address.port()); - Ok(nym_topology::RoutingNode { - node_id: u32::MAX, + Ok(LocalGatewayNode { + active_sphinx_keys: self.active_sphinx_keys()?.clone(), mix_host, - entry: Some(nym_topology::EntryDetails { + identity_key: *self.ed25519_identity_key(), + entry: nym_topology::EntryDetails { ip_addresses, clients_ws_port, hostname: self.config.host.hostname.clone(), clients_wss_port: self.config.gateway_tasks.announce_wss_port, - }), - sphinx_key: *self.x25519_sphinx_key(), - identity_key: *self.ed25519_identity_key(), - supported_roles: nym_topology::SupportedRoles { - mixnode: false, - mixnet_entry: true, - mixnet_exit: true, }, }) } @@ -697,13 +700,6 @@ impl NymNode { } pub(crate) async fn build_http_server(&self) -> Result { - let host_details = sign_host_details( - &self.config, - self.x25519_sphinx_keys.public_key(), - self.x25519_noise_keys.public_key(), - &self.ed25519_identity_keys, - )?; - let auxiliary_details = api_requests::v1::node::models::AuxiliaryDetails { location: self.config.host.location, announce_ports: AnnouncePorts { @@ -773,7 +769,7 @@ impl NymNode { policy: None, }; - let mut config = HttpServerConfig::new(host_details) + let mut config = HttpServerConfig::new() .with_landing_page_assets(self.config.http.landing_page_assets_path.as_ref()) .with_mixnode_details(mixnode_details) .with_gateway_details(gateway_details) @@ -804,7 +800,20 @@ impl NymNode { config.api.v1_config.node.roles.ip_packet_router_enabled = true; } + let x25519_noise_key = if self.config.mixnet.debug.unsafe_disable_noise { + None + } else { + Some(*self.x25519_noise_keys.public_key()) + }; + let app_state = AppState::new( + StaticNodeInformation { + ed25519_identity_keys: self.ed25519_identity_keys.clone(), + x25519_noise_key, + ip_addresses: self.config.host.public_ips.clone(), + hostname: self.config.host.hostname.clone(), + }, + self.active_sphinx_keys()?.clone(), self.metrics.clone(), self.verloc_stats.clone(), self.config.http.node_load_cache_ttl, @@ -815,54 +824,20 @@ impl NymNode { .await?) } - fn user_agent(&self) -> UserAgent { + fn user_agent() -> UserAgent { bin_info!().into() } - async fn try_refresh_remote_nym_api_cache(&self) { - info!("attempting to request described cache refresh from nym-api..."); - if self.config.mixnet.nym_api_urls.is_empty() { - warn!("no nym-api urls available"); - return; - } + async fn try_refresh_remote_nym_api_cache( + &self, + client: &NymApisClient, + ) -> Result<(), NymNodeError> { + info!("attempting to request described cache refresh from nym-api(s)..."); - for nym_api_url in &self.config.mixnet.nym_api_urls { - info!("trying {nym_api_url}..."); - - let nym_api = - match nym_http_api_client::ClientBuilder::new_with_url(nym_api_url.clone()) - .no_hickory_dns() - .with_user_agent(self.user_agent()) - .build::<&str>() - { - Ok(b) => b, - Err(e) => { - warn!("failed to build http client for \"{nym_api_url}\": {e}",); - continue; - } - }; - - let client = NymApiClient::from(nym_api); - - // make new request every time in case previous one takes longer and invalidates the signature - let request = NodeRefreshBody::new(self.ed25519_identity_keys.private_key()); - match timeout( - Duration::from_secs(10), - client.nym_api.force_refresh_describe_cache(&request), - ) - .await - { - Ok(Ok(_)) => { - info!("managed to refresh own self-described data cache") - } - Ok(Err(request_failure)) => { - warn!("failed to resolve the refresh request: {request_failure}") - } - Err(_timeout) => { - warn!("timed out while attempting to resolve the request. the cache might be stale") - } - }; - } + client + .broadcast_force_refresh(self.ed25519_identity_keys.private_key()) + .await; + Ok(()) } pub(crate) fn start_verloc_measurements(&self) { @@ -871,7 +846,7 @@ impl NymNode { self.config.verloc.bind_address ); - let mut base_agent = self.user_agent(); + let mut base_agent = Self::user_agent(); base_agent.application = format!("{}-verloc", base_agent.application); let config = nym_verloc::measurements::ConfigBuilder::new( self.config.mixnet.nym_api_urls.clone(), @@ -964,7 +939,6 @@ impl NymNode { // >>>> END: register all relevant handlers // console logger to preserve old mixnode functionalities - // if self.config.logging.debug.log_to_console { if self.config.metrics.debug.log_stats_to_console { ConsoleLogger::new( self.config.metrics.debug.console_logging_update_interval, @@ -984,30 +958,78 @@ impl NymNode { pub(crate) async fn setup_replay_detection( &self, - ) -> Result { + ) -> Result { if self.config.mixnet.replay_protection.debug.unsafe_disabled { - return Ok(ReplayProtectionBloomfilter::new_disabled()); + return Ok(ReplayProtectionBloomfiltersManager::new_disabled( + self.metrics.clone(), + )); } // create the background task for the bloomfilter // to reset it and flush it to disk - let mut replay_detection_background = ReplayProtectionBackgroundTask::new( + let sphinx_keys = self.sphinx_keys()?; + let mut replay_detection_background = ReplayProtectionDiskFlush::new( &self.config, + sphinx_keys.keys.primary_key_rotation_id(), + sphinx_keys.keys.secondary_key_rotation_id(), self.metrics.clone(), self.shutdown_manager - .clone_token("replay-detection-background"), + .clone_token("replay-detection-background-flush"), ) .await?; - let replay_protection_bloomfilter = replay_detection_background.global_bloomfilter(); + let bloomfilters_manager = replay_detection_background.bloomfilters_manager(); self.shutdown_manager .spawn(async move { replay_detection_background.run().await }); - Ok(replay_protection_bloomfilter) + Ok(bloomfilters_manager) + } + + // I'm assuming this will be needed in other places, so it's explicitly extracted + fn setup_nym_apis_client(&self) -> Result { + NymApisClient::new( + &self.config.mixnet.nym_api_urls, + self.shutdown_manager.clone_token("nym-apis-client"), + ) + } + + #[track_caller] + fn sphinx_keys(&self) -> Result<&SphinxKeyManager, NymNodeError> { + self.sphinx_key_manager + .as_ref() + .ok_or(NymNodeError::ConsumedSphinxKeys) + } + + fn take_managed_sphinx_keys(&mut self) -> Result { + self.sphinx_key_manager + .take() + .ok_or(NymNodeError::ConsumedSphinxKeys) + } + + pub(crate) async fn setup_key_rotation( + &mut self, + nym_apis_client: NymApisClient, + replay_protection_manager: ReplayProtectionBloomfiltersManager, + ) -> Result<(), NymNodeError> { + let managed_keys = self.take_managed_sphinx_keys()?; + let rotation_state = nym_apis_client.get_key_rotation_info().await?; + + let rotation_controller = KeyRotationController::new( + &self.config, + rotation_state.into(), + nym_apis_client, + replay_protection_manager, + managed_keys, + self.shutdown_manager.clone_token("key-rotation-controller"), + ); + + rotation_controller.start(); + Ok(()) } pub(crate) async fn start_mixnet_listener( &self, active_clients_store: &ActiveClientsStore, + replay_protection_bloomfilter: ReplayProtectionBloomfilters, routing_filter: F, shutdown: ShutdownToken, ) -> Result<(MixForwardingSender, ActiveConnections), NymNodeError> @@ -1038,7 +1060,6 @@ impl NymNode { ); let active_connections = mixnet_client.active_connections(); - let replay_protection_bloomfilter = self.setup_replay_detection().await?; let mut packet_forwarder = PacketForwarder::new( mixnet_client, routing_filter, @@ -1055,7 +1076,7 @@ impl NymNode { let shared = mixnet::SharedData::new( processing_config, - self.x25519_sphinx_keys.clone(), + self.active_sphinx_keys()?, replay_protection_bloomfilter, mix_packet_sender.clone(), final_hop_data, @@ -1070,6 +1091,7 @@ impl NymNode { pub(crate) async fn run_minimal_mixnet_processing(self) -> Result<(), NymNodeError> { self.start_mixnet_listener( &ActiveClientsStore::new(), + ReplayProtectionBloomfilters::new_disabled(), OpenFilter, self.shutdown_manager.clone_token("mixnet-traffic"), ) @@ -1104,15 +1126,21 @@ impl NymNode { } }); - self.try_refresh_remote_nym_api_cache().await; + let nym_apis_client = self.setup_nym_apis_client()?; + + self.try_refresh_remote_nym_api_cache(&nym_apis_client) + .await?; self.start_verloc_measurements(); let network_refresher = self.build_network_refresher().await?; let active_clients_store = ActiveClientsStore::new(); + let bloomfilters_manager = self.setup_replay_detection().await?; + let (mix_packet_sender, active_egress_mixnet_connections) = self .start_mixnet_listener( &active_clients_store, + bloomfilters_manager.bloomfilters(), network_refresher.routing_filter(), self.shutdown_manager.clone_token("mixnet-traffic"), ) @@ -1133,6 +1161,9 @@ impl NymNode { ) .await?; + self.setup_key_rotation(nym_apis_client, bloomfilters_manager) + .await?; + network_refresher.start(); self.shutdown_manager.close(); diff --git a/nym-node/src/node/nym_apis_client.rs b/nym-node/src/node/nym_apis_client.rs new file mode 100644 index 00000000000..918e3a756cc --- /dev/null +++ b/nym-node/src/node/nym_apis_client.rs @@ -0,0 +1,216 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::error::NymNodeError; +use crate::node::NymNode; +use futures::{stream, StreamExt}; +use nym_crypto::asymmetric::ed25519; +use nym_http_api_client::Client; +use nym_task::ShutdownToken; +use nym_validator_client::client::NymApiClientExt; +use nym_validator_client::models::{KeyRotationInfoResponse, NodeRefreshBody}; +use nym_validator_client::nym_api::error::NymAPIError; +use nym_validator_client::NymApiClient; +use rand::prelude::SliceRandom; +use rand::thread_rng; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::RwLock; +use tokio::time::sleep; +use tracing::{debug, warn}; +use url::Url; + +#[derive(Clone)] +pub struct NymApisClient { + inner: Arc>, +} + +struct InnerClient { + active_client: NymApiClient, + available_urls: Vec, + shutdown_token: ShutdownToken, + currently_used_api: usize, +} + +impl NymApisClient { + pub(crate) fn new( + nym_apis: &[Url], + shutdown_token: ShutdownToken, + ) -> Result { + if nym_apis.is_empty() { + return Err(NymNodeError::NoNymApiUrls); + } + + let mut urls = nym_apis.to_vec(); + urls.shuffle(&mut thread_rng()); + + let active_client = nym_http_api_client::Client::builder(urls[0].clone())? + .no_hickory_dns() + .with_user_agent(NymNode::user_agent()) + .with_timeout(Duration::from_secs(5)) + .build()?; + + Ok(NymApisClient { + inner: Arc::new(RwLock::new(InnerClient { + active_client: NymApiClient::from(active_client), + available_urls: urls, + shutdown_token, + currently_used_api: 0, + })), + }) + } + + // async fn use_next_endpoint(&self) { + // let mut guard = self.inner.write().await; + // if guard.available_urls.len() == 1 { + // return; + // } + // + // let next_index = (guard.currently_used_api + 1) % guard.available_urls.len(); + // let next = guard.available_urls[next_index].clone(); + // guard.currently_used_api = next_index; + // guard.active_client.change_nym_api(next) + // } + + pub(crate) async fn query_exhaustively( + &self, + req: R, + timeout_duration: Duration, + ) -> Result + where + R: AsyncFn(Client) -> Result, + { + let guard = self.inner.read().await; + let (res, last_working_endpoint) = guard.query_exhaustively(req, timeout_duration).await?; + + // if we had to use a different api, update our starting point for the future calls + if guard.currently_used_api != last_working_endpoint { + drop(guard); + let mut guard = self.inner.write().await; + let next_url = guard.available_urls[last_working_endpoint].clone(); + guard.currently_used_api = last_working_endpoint; + guard.active_client.change_nym_api(next_url); + } + + Ok(res) + } + + pub(crate) async fn broadcast_force_refresh(&self, private_key: &ed25519::PrivateKey) { + self.inner + .read() + .await + .broadcast_force_refresh(private_key) + .await; + } + + pub(crate) async fn get_key_rotation_info( + &self, + ) -> Result { + self.query_exhaustively( + async |c| c.get_key_rotation_info().await, + Duration::from_secs(5), + ) + .await + } +} + +impl InnerClient { + // currently there are no cases without json body, but for those we'd just need to slightly adjust the signature + async fn broadcast(&self, request_body: &B, req: R, timeout_duration: Duration) + where + R: AsyncFn(Client, &B) -> Result<(), NymAPIError>, + { + let broadcast_fut = + stream::iter(self.available_urls.clone()).for_each_concurrent(None, |url| { + let nym_api = self.active_client.nym_api.clone_with_new_url(url.clone()); + let req_fut = req(nym_api, request_body); + async move { + if let Err(err) = req_fut.await { + warn!("broadcast request to {url} failed: {err}") + } + } + }); + + let timeout_fut = sleep(timeout_duration); + + tokio::select! { + _ = broadcast_fut => { + debug!("managed to broadcast data to all nym apis") + } + _ = timeout_fut => { + warn!("timed out while attempting to broadcast data to known nym apis") + + } + _ = self.shutdown_token.cancelled() => { + debug!("received shutdown while attempting to broadcast data to known nym apis") + } + } + } + + async fn query_exhaustively( + &self, + req: R, + timeout_duration: Duration, + ) -> Result<(T, usize), NymNodeError> + where + R: AsyncFn(Client) -> Result, + { + let last_working = self.currently_used_api; + + // start from the last working api and progress from there + // also, note this is DESIGNED to query sequentially (but exhaustively) + // and not to try to send queries to ALL apis at once + // and check which resolves first + for (idx, url) in self + .available_urls + .iter() + .enumerate() + .skip(last_working) + .chain(self.available_urls.iter().enumerate().take(last_working)) + { + let nym_api = self.active_client.nym_api.clone_with_new_url(url.clone()); + + let timeout_fut = sleep(timeout_duration); + let query_fut = req(nym_api); + + tokio::select! { + res = query_fut => { + debug!("managed to broadcast data to all nym apis"); + match res { + Ok(res) => return Ok((res, idx)), + Err(err) => { + warn!("failed to resolve query for {url}: {err}"); + } + } + } + _ = timeout_fut => { + warn!("timed out while attempting to query {url}") + + } + _ = self.shutdown_token.cancelled() => { + debug!("received shutdown while attempting to query {url}"); + return Err(NymNodeError::ShutdownReceived) + } + } + } + + Err(NymNodeError::NymApisExhausted) + } + + async fn broadcast_force_refresh(&self, private_key: &ed25519::PrivateKey) { + let request = NodeRefreshBody::new(private_key); + + self.broadcast( + &request, + async |client, request| client.force_refresh_describe_cache(request).await, + Duration::from_secs(10), + ) + .await; + } +} + +impl AsRef for InnerClient { + fn as_ref(&self) -> &NymApiClient { + &self.active_client + } +} diff --git a/nym-node/src/node/replay_protection/background_task.rs b/nym-node/src/node/replay_protection/background_task.rs index 392c79cd681..68092e02088 100644 --- a/nym-node/src/node/replay_protection/background_task.rs +++ b/nym-node/src/node/replay_protection/background_task.rs @@ -1,204 +1,233 @@ // Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only +use crate::config::persistence::{ + DEFAULT_RD_BLOOMFILTER_FILE_EXT, DEFAULT_RD_BLOOMFILTER_FLUSH_FILE_EXT, +}; use crate::config::Config; use crate::error::NymNodeError; -use crate::node::replay_protection::bloomfilter::ReplayProtectionBloomfilter; +use crate::node::replay_protection::bloomfilter::RotationFilter; +use crate::node::replay_protection::helpers::parse_rotation_id_from_filename; use crate::node::replay_protection::items_in_bloomfilter; -use human_repr::HumanCount; +use crate::node::replay_protection::manager::ReplayProtectionBloomfiltersManager; +use human_repr::HumanDuration; use nym_node_metrics::NymNodeMetrics; use nym_task::ShutdownToken; -use std::cmp::max; +use std::collections::HashMap; use std::fs; use std::path::PathBuf; use std::time::Duration; +use tokio::fs::File; +use tokio::io::AsyncWriteExt; use tokio::time::{interval, Instant}; -use tracing::{error, info, trace, warn}; +use tracing::{debug, error, info, trace, warn}; -struct LastResetData { - packets_received_at_last_reset: usize, - reset_time: Instant, -} - -struct ReplayProtectionBackgroundTaskConfig { - current_bloomfilter_path: PathBuf, - current_bloomfilter_temp_flush_path: PathBuf, - - false_positive_rate: f64, - filter_reset_rate: Duration, +// background task responsible for periodically flushing the bloomfilters to disk +pub struct ReplayProtectionDiskFlush { + bloomfilters_directory: PathBuf, disk_flushing_rate: Duration, - bloomfilter_size_multiplier: f64, - minimum_bloomfilter_packets_per_second: usize, -} -impl From<&Config> for ReplayProtectionBackgroundTaskConfig { - fn from(config: &Config) -> Self { - ReplayProtectionBackgroundTaskConfig { - current_bloomfilter_path: config - .mixnet - .replay_protection - .storage_paths - .current_bloomfilter_filepath(), - current_bloomfilter_temp_flush_path: config - .mixnet - .replay_protection - .storage_paths - .current_bloomfilter_being_flushed_filepath(), - false_positive_rate: config.mixnet.replay_protection.debug.false_positive_rate, - filter_reset_rate: config.mixnet.replay_protection.debug.bloomfilter_reset_rate, - disk_flushing_rate: config - .mixnet - .replay_protection - .debug - .bloomfilter_disk_flushing_rate, - bloomfilter_size_multiplier: config - .mixnet - .replay_protection - .debug - .bloomfilter_size_multiplier, - minimum_bloomfilter_packets_per_second: config - .mixnet - .replay_protection - .debug - .bloomfilter_minimum_packets_per_second_size, - } - } -} - -// background task responsible for periodically flushing the bloomfilter to disk -// as well as clearing it up on the specified timer -// (in the future this will be enforced by key rotation) -pub struct ReplayProtectionBackgroundTask { - config: ReplayProtectionBackgroundTaskConfig, - last_reset: LastResetData, - - filter: ReplayProtectionBloomfilter, - metrics: NymNodeMetrics, + filters_manager: ReplayProtectionBloomfiltersManager, shutdown_token: ShutdownToken, } -impl ReplayProtectionBackgroundTask { +impl ReplayProtectionDiskFlush { pub(crate) async fn new( config: &Config, + primary_key_rotation_id: u32, + secondary_key_rotation_id: Option, metrics: NymNodeMetrics, shutdown_token: ShutdownToken, ) -> Result { - let task_config: ReplayProtectionBackgroundTaskConfig = config.into(); - - if task_config.current_bloomfilter_temp_flush_path.exists() { - error!( - "bloomfilter didn't get successfully flushed to disk and its data got corrupted" - ); - fs::remove_file(&task_config.current_bloomfilter_temp_flush_path).map_err(|source| { - NymNodeError::BloomfilterIoFailure { - source, - path: task_config.current_bloomfilter_temp_flush_path.clone(), + let bloomfilters_directory = config + .mixnet + .replay_protection + .storage_paths + .current_bloomfilters_directory + .clone(); + + let dir_read_err = |source| NymNodeError::BloomfilterIoFailure { + source, + path: bloomfilters_directory.clone(), + }; + + if !bloomfilters_directory.exists() { + fs::create_dir_all(&bloomfilters_directory).map_err(dir_read_err)?; + } + + let available_filters_dir = fs::read_dir(&bloomfilters_directory).map_err(dir_read_err)?; + + // figure out what bloomfilters we have available on disk + let mut filter_files = HashMap::new(); + for entry in available_filters_dir.into_iter() { + let entry = entry.map_err(dir_read_err)?; + let path = entry.path(); + + let Some(rotation) = entry + .file_name() + .to_str() + .and_then(parse_rotation_id_from_filename) + else { + warn!("invalid bloomfilter file at '{}'", path.display()); + continue; + }; + + // if any bloomfilter has the temp extension, we can't trust its data as it hasn't completed the flush + if let Some(ext) = entry.path().extension() { + if ext == DEFAULT_RD_BLOOMFILTER_FLUSH_FILE_EXT { + error!( + "bloomfilter {rotation} didn't get successfully flushed to disk and its data got corrupted" + ); + fs::remove_file(&path) + .map_err(|source| NymNodeError::BloomfilterIoFailure { source, path })?; + continue; } - })? + } + + filter_files.insert(rotation, path); } - // if there's nothing on disk, we must create a new filter - let bloomfilter = if task_config.current_bloomfilter_path.exists() { - ReplayProtectionBloomfilter::load(&task_config.current_bloomfilter_path).await? - } else { - let bf_items = items_in_bloomfilter( - task_config.filter_reset_rate, - config - .mixnet - .replay_protection - .debug - .initial_expected_packets_per_second, - ); - - ReplayProtectionBloomfilter::new_empty(bf_items, task_config.false_positive_rate)? + let rebuild_items_in_filter = items_in_bloomfilter( + Duration::from_secs(25 * 60 * 60), + config + .mixnet + .replay_protection + .debug + .initial_expected_packets_per_second, + ); + let fp_r = config.mixnet.replay_protection.debug.false_positive_rate; + + // if filters do not exist on disk, we must make new ones + let primary_bloomfilter = match filter_files.get(&primary_key_rotation_id) { + Some(primary_path) => RotationFilter::load(primary_path)?, + None => { + info!("no stored bloomfilter for rotation {primary_key_rotation_id}"); + RotationFilter::new(rebuild_items_in_filter, fp_r, 0, primary_key_rotation_id)? + } }; - Ok(ReplayProtectionBackgroundTask { - config: task_config, - last_reset: LastResetData { - packets_received_at_last_reset: 0, - reset_time: Instant::now(), - }, - filter: bloomfilter, - metrics, + let secondary_bloomfilter = + if let Some(secondary_key_rotation_id) = secondary_key_rotation_id { + match filter_files.get(&secondary_key_rotation_id) { + Some(secondary_path) => Some(RotationFilter::load(secondary_path)?), + None => { + info!("no stored bloomfilter for rotation {secondary_key_rotation_id}"); + Some(RotationFilter::new( + rebuild_items_in_filter, + fp_r, + 0, + secondary_key_rotation_id, + )?) + } + } + } else { + None + }; + + Ok(ReplayProtectionDiskFlush { + bloomfilters_directory, + disk_flushing_rate: config + .mixnet + .replay_protection + .debug + .bloomfilter_disk_flushing_rate, + filters_manager: ReplayProtectionBloomfiltersManager::new( + config, + primary_bloomfilter, + secondary_bloomfilter, + metrics, + ), shutdown_token, }) } - pub(crate) fn global_bloomfilter(&self) -> ReplayProtectionBloomfilter { - self.filter.clone() + fn bloomfilter_filepath(&self, rotation_id: u32) -> PathBuf { + self.bloomfilters_directory + .join(format!("rot-{rotation_id}")) + .with_extension(DEFAULT_RD_BLOOMFILTER_FILE_EXT) } - async fn flush_to_disk(&self) -> Result<(), NymNodeError> { - if let Some(temp_parent) = self.config.current_bloomfilter_temp_flush_path.parent() { - fs::create_dir_all(temp_parent).map_err(|source| { - NymNodeError::BloomfilterIoFailure { - source, - path: temp_parent.to_path_buf(), - } - })? - } - if let Some(current_parent) = self.config.current_bloomfilter_temp_flush_path.parent() { - fs::create_dir_all(current_parent).map_err(|source| { - NymNodeError::BloomfilterIoFailure { - source, - path: current_parent.to_path_buf(), - } - })? - } + fn current_bloomfilter_being_flushed_filepath(&self, rotation_id: u32) -> PathBuf { + self.bloomfilters_directory + .join(format!("rot-{rotation_id}")) + .with_extension(DEFAULT_RD_BLOOMFILTER_FLUSH_FILE_EXT) + } + pub(crate) fn bloomfilters_manager(&self) -> ReplayProtectionBloomfiltersManager { + self.filters_manager.clone() + } + + async fn flush(&self, data: Vec, rotation_id: u32) -> Result<(), NymNodeError> { // because it takes a while to actually write the file to disk, // we first write bytes to temporary location, // and then we move it to the correct path - let temp = &self.config.current_bloomfilter_temp_flush_path; - self.filter.flush_to_disk(temp).await?; - fs::rename(temp, &self.config.current_bloomfilter_path).map_err(|source| { + let temp_path = self.current_bloomfilter_being_flushed_filepath(rotation_id); + let final_path = self.bloomfilter_filepath(rotation_id); + debug!("flushing replay protection bloomfilter {rotation_id} to disk..."); + let start = Instant::now(); + + let mut file = File::create(&temp_path).await.map_err(|source| { NymNodeError::BloomfilterIoFailure { source, - path: self.config.current_bloomfilter_path.clone(), + path: temp_path.clone(), } })?; - Ok(()) - } - fn reset_bloomfilter(&mut self) -> Result<(), NymNodeError> { - // 1. determine parameters for new bloomfilter - let received = self.metrics.mixnet.ingress.forward_hop_packets_received() - + self.metrics.mixnet.ingress.final_hop_packets_received(); + file.write_all(&data) + .await + .map_err(|source| NymNodeError::BloomfilterIoFailure { + source, + path: temp_path.to_path_buf(), + })?; - let time_delta = self.last_reset.reset_time.elapsed(); - let received_since_last_reset = received - self.last_reset.packets_received_at_last_reset; - let received_per_second = - (received_since_last_reset as f64 / time_delta.as_secs_f64()).round() as usize; + fs::rename(temp_path, &final_path).map_err(|source| { + NymNodeError::BloomfilterIoFailure { + source, + path: final_path, + } + })?; - let bf_received = max( - received_per_second, - self.config.minimum_bloomfilter_packets_per_second, - ); - let items_in_new_filter = items_in_bloomfilter(self.config.filter_reset_rate, bf_received); - let adjusted = - (items_in_new_filter as f64 * self.config.bloomfilter_size_multiplier).round() as usize; + let elapsed = start.elapsed(); info!( - "resetting bloom filter. new expected number of packets: {} that preserve fp rate of {}", - adjusted.human_count_bare(), - self.config.false_positive_rate + "flushed replay protection bloomfilter {rotation_id} to disk. it took: {}", + elapsed.human_duration() ); - // 2. update the filter - self.last_reset.reset_time = Instant::now(); - self.last_reset.packets_received_at_last_reset = received_since_last_reset; + Ok(()) + } - // if this fails with the mutex getting poisoned, the next received packet is going to cause - // a shutdown, so we don't have to propagate it here - self.filter.reset(adjusted, self.config.false_positive_rate) + // average HDD has the write speed of ~80MB/s so a 2GB bloomfilter would take almost 30s to write... + // and this function is explicitly async and using tokio's async operations, because otherwise + // we'd have to go through the whole hassle of using spawn_blocking and awaiting that one instead + async fn flush_primary(&self) -> Result<(), NymNodeError> { + let (bytes, id) = self.filters_manager.primary_bytes_and_id()?; + self.flush(bytes, id).await } - pub(crate) async fn run(&mut self) { - let mut reset_timer = interval(self.config.filter_reset_rate); - reset_timer.reset(); + async fn flush_secondary(&self) -> Result<(), NymNodeError> { + let Some((bytes, id)) = self.filters_manager.secondary_bytes_and_id()? else { + return Ok(()); + }; + self.flush(bytes, id).await + } - let mut flush_timer = interval(self.config.disk_flushing_rate); + async fn flush_filters_to_disk(&self) -> Result<(), NymNodeError> { + if let Some(parent) = self.bloomfilters_directory.parent() { + fs::create_dir_all(parent).map_err(|source| NymNodeError::BloomfilterIoFailure { + source, + path: parent.to_path_buf(), + })? + } + + self.flush_primary().await?; + self.flush_secondary().await?; + + Ok(()) + } + + pub(crate) async fn run(&mut self) { + let mut flush_timer = interval(self.disk_flushing_rate); flush_timer.reset(); loop { @@ -208,13 +237,8 @@ impl ReplayProtectionBackgroundTask { trace!("ReplayProtectionBackgroundTask: Received shutdown"); break; } - _ = reset_timer.tick() => { - if let Err(err) = self.reset_bloomfilter() { - error!("failed to reset the bloomfilter: {err}") - } - } _ = flush_timer.tick() => { - if let Err(err) = self.flush_to_disk().await { + if let Err(err) = self.flush_filters_to_disk().await { error!("failed to flush bloomfilter to disk: {err}") } } @@ -222,8 +246,8 @@ impl ReplayProtectionBackgroundTask { } info!("SHUTDOWN: flushing replay detection bloomfilter to disk. this might take a while. DO NOT INTERRUPT THIS PROCESS"); - if let Err(err) = self.flush_to_disk().await { - warn!("failed to flush replay detection bloom filter on shutdown: {err}"); + if let Err(err) = self.flush_filters_to_disk().await { + warn!("failed to flush replay detection bloom filters on shutdown: {err}"); } } } diff --git a/nym-node/src/node/replay_protection/bloomfilter.rs b/nym-node/src/node/replay_protection/bloomfilter.rs index 087b7b5444c..7ae35875cba 100644 --- a/nym-node/src/node/replay_protection/bloomfilter.rs +++ b/nym-node/src/node/replay_protection/bloomfilter.rs @@ -3,43 +3,128 @@ use crate::error::NymNodeError; use bloomfilter::Bloom; -use human_repr::HumanDuration; use nym_sphinx_types::REPLAY_TAG_SIZE; +use std::collections::HashMap; +use std::fs::File; +use std::io::Read; +use std::mem; use std::path::Path; -use std::sync::{Arc, PoisonError, TryLockError}; -use tokio::fs::File; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; -use tokio::time::Instant; -use tracing::{debug, info}; +use std::sync::{Arc, Mutex, PoisonError, TryLockError}; +use time::OffsetDateTime; +use tracing::{error, info, warn}; + +// auxiliary data associated with the bloomfilter to get some statistics from the time of its creation +// this is needed in order to more accurately resize it upon reset + +#[derive(Copy, Clone)] +pub(crate) struct ReplayProtectionBloomfilterMetadata { + // used in the unlikely case of epoch durations being changed. it doesn't really cost us anything + // to include it, so might as well + pub(crate) creation_time: OffsetDateTime, + + /// Number of packets that this node has received since startup, as recorded when this bloomfilter was created. + /// Used for determining the approximate packet rate and thus number of entries in the bloomfilter + pub(crate) packets_received_at_creation: usize, + + pub(crate) rotation_id: u32, +} + +impl ReplayProtectionBloomfilterMetadata { + const SERIALIZED_LEN: usize = size_of::() + size_of::() + size_of::(); + + // UNIX_TIMESTAMP || PACKETS_RECEIVED || ROTATION_ID + pub(crate) fn bytes(&self) -> Vec { + self.creation_time + .unix_timestamp() + .to_be_bytes() + .into_iter() + .chain((self.packets_received_at_creation as u64).to_be_bytes()) + .chain(self.rotation_id.to_be_bytes()) + .collect() + } + pub(crate) fn try_from_bytes(bytes: &[u8]) -> Result { + if bytes.len() != Self::SERIALIZED_LEN { + return Err(NymNodeError::BloomfilterMetadataDeserialisationFailure); + } + + // SAFETY: we just checked we have correct number of bytes + #[allow(clippy::unwrap_used)] + let creation_timestamp = i64::from_be_bytes(bytes[0..8].try_into().unwrap()); + + #[allow(clippy::unwrap_used)] + let packets_received_at_creation = + u64::from_be_bytes(bytes[8..16].try_into().unwrap()) as usize; + + #[allow(clippy::unwrap_used)] + let rotation_id = u32::from_be_bytes(bytes[16..].try_into().unwrap()); + + Ok(ReplayProtectionBloomfilterMetadata { + creation_time: OffsetDateTime::from_unix_timestamp(creation_timestamp) + .map_err(|_| NymNodeError::BloomfilterMetadataDeserialisationFailure)?, + packets_received_at_creation, + rotation_id, + }) + } +} // it appears that now std Mutex is faster (or comparable) to parking_lot // in high contention situations: https://github.com/rust-lang/rust/pull/95035#issuecomment-1073966631 // (tokio's async Mutex has too much overhead due to the number of access required) #[derive(Clone)] -pub(crate) struct ReplayProtectionBloomfilter { +pub(crate) struct ReplayProtectionBloomfilters { disabled: bool, - inner: Arc>, + inner: Arc>, } -impl ReplayProtectionBloomfilter { - pub(crate) fn new_empty(items_count: usize, fp_p: f64) -> Result { - Ok(ReplayProtectionBloomfilter { +impl ReplayProtectionBloomfilters { + pub(crate) fn new(primary: RotationFilter, secondary: Option) -> Self { + // figure out if the secondary filter is the overlap or pre_announced filter + let primary_id = primary.metadata.rotation_id; + + let next = primary_id + 1; + let previous = primary_id.checked_sub(1); + let (overlap, pre_announced) = match secondary { + None => (None, None), + Some(secondary_filter) => { + let secondary_id = secondary_filter.metadata.rotation_id; + if secondary_id == next { + (None, Some(secondary_filter)) + } else if Some(secondary_id) == previous { + (Some(secondary_filter), None) + } else { + warn!("{secondary_id} is not valid for either pre_announced or overlap bloomfilter given primary rotation of {primary_id}"); + (None, None) + } + } + }; + + ReplayProtectionBloomfilters { disabled: false, - inner: Arc::new(std::sync::Mutex::new(ReplayProtectionBloomfilterInner { - current_filter: Bloom::new_for_fp_rate(items_count, fp_p) - .map_err(NymNodeError::bloomfilter_failure)?, + inner: Arc::new(Mutex::new(ReplayProtectionBloomfiltersInner { + primary, + overlap, + pre_announced, })), - }) + } } // SAFETY: the hardcoded values of 1,1 are valid #[allow(clippy::unwrap_used)] pub(crate) fn new_disabled() -> Self { // well, technically it's not fully empty, but the memory footprint is negligible - ReplayProtectionBloomfilter { + ReplayProtectionBloomfilters { disabled: true, - inner: Arc::new(std::sync::Mutex::new(ReplayProtectionBloomfilterInner { - current_filter: Bloom::new(1, 1).unwrap(), + inner: Arc::new(std::sync::Mutex::new(ReplayProtectionBloomfiltersInner { + primary: RotationFilter { + metadata: ReplayProtectionBloomfilterMetadata { + creation_time: OffsetDateTime::now_utc(), + packets_received_at_creation: 0, + rotation_id: u32::MAX, + }, + data: Bloom::new(1, 1).unwrap(), + }, + overlap: None, + pre_announced: None, })), } } @@ -48,14 +133,13 @@ impl ReplayProtectionBloomfilter { self.disabled } - pub(crate) fn reset(&self, items_count: usize, fp_p: f64) -> Result<(), NymNodeError> { - // 1. build the new filter - let new_inner = ReplayProtectionBloomfilterInner { - current_filter: Bloom::new_for_fp_rate(items_count, fp_p) - .map_err(NymNodeError::bloomfilter_failure)?, - }; - - // 2. swap it + pub(crate) fn allocate_pre_announced( + &self, + items_count: usize, + fp_p: f64, + packets_received_at_creation: usize, + rotation_id: u32, + ) -> Result<(), NymNodeError> { let mut guard = self .inner .lock() @@ -63,161 +147,256 @@ impl ReplayProtectionBloomfilter { message: "mutex got poisoned", })?; - *guard = new_inner; + guard.pre_announced = Some(RotationFilter::new( + items_count, + fp_p, + packets_received_at_creation, + rotation_id, + )?); Ok(()) } - // NOTE: with key rotations we'll have to check whether the file is still valid and which - // key it corresponds to, but that's a future problem - pub(crate) async fn load>(path: P) -> Result { - info!("attempting to load prior replay detection bloomfilter..."); - let path = path.as_ref(); - let mut file = - File::open(path) - .await - .map_err(|source| NymNodeError::BloomfilterIoFailure { - source, - path: path.to_path_buf(), - })?; - - let mut buf = Vec::new(); - file.read_to_end(&mut buf) - .await - .map_err(|source| NymNodeError::BloomfilterIoFailure { - source, - path: path.to_path_buf(), - })?; - - Ok(ReplayProtectionBloomfilter { - disabled: false, - inner: Arc::new(std::sync::Mutex::new(ReplayProtectionBloomfilterInner { - current_filter: Bloom::from_bytes(buf) - .map_err(NymNodeError::bloomfilter_failure)?, - })), - }) - } - - // average HDD has the write speed of ~80MB/s so a 2GB bloomfilter would take almost 30s to write... - // and this function is explicitly async and using tokio's async operations, because otherwise - // we'd have to go through the whole hassle of using spawn_blocking and awaiting that one instead - pub(crate) async fn flush_to_disk>(&self, path: P) -> Result<(), NymNodeError> { - debug!("flushing replay protection bloomfilter to disk..."); - let start = Instant::now(); - let path = path.as_ref(); - - let mut file = - File::create(path) - .await - .map_err(|source| NymNodeError::BloomfilterIoFailure { - source, - path: path.to_path_buf(), - })?; - let data = self.bytes().map_err(|_| NymNodeError::BloomfilterFailure { - message: "mutex got poisoned", - })?; - file.write_all(&data) - .await - .map_err(|source| NymNodeError::BloomfilterIoFailure { - source, - path: path.to_path_buf(), + pub(crate) fn promote_pre_announced(&self) -> Result<(), NymNodeError> { + let mut guard = self + .inner + .lock() + .map_err(|_| NymNodeError::BloomfilterFailure { + message: "mutex got poisoned", })?; - let elapsed = start.elapsed(); + let Some(mut pre_announced) = guard.pre_announced.take() else { + error!("there was no pre-announced bloomfilter to promote"); + return Ok(()); + }; - info!( - "flushed replay protection bloomfilter to disk. it took: {}", - elapsed.human_duration() - ); + // pre_announced -> primary + // primary -> temp (pre_announced) + mem::swap(&mut guard.primary, &mut pre_announced); + // temp (pre_announced) -> secondary + guard.overlap = Some(pre_announced); Ok(()) } -} -struct ReplayProtectionBloomfilterInner { - // metadata to do with epochs, etc. - current_filter: Bloom<[u8; REPLAY_TAG_SIZE]>, - // overlap_filter: bloomfilter::Bloom<[u8; REPLAY_TAG_SIZE]>, -} + pub(crate) fn purge_secondary(&self) -> Result<(), NymNodeError> { + let mut guard = self + .inner + .lock() + .map_err(|_| NymNodeError::BloomfilterFailure { + message: "mutex got poisoned", + })?; + guard.overlap = None; + Ok(()) + } -impl ReplayProtectionBloomfilter { - #[allow(dead_code)] - pub(crate) fn check_and_set( + pub(crate) fn primary_metadata( &self, - replay_tag: &[u8; REPLAY_TAG_SIZE], - ) -> Result> { - let Ok(mut guard) = self.inner.lock() else { - return Err(PoisonError::new(())); - }; + ) -> Result { + let metadata = self + .inner + .lock() + .map_err(|_| NymNodeError::BloomfilterFailure { + message: "mutex got poisoned", + })? + .primary + .metadata; - Ok(guard.current_filter.check_and_set(replay_tag)) + Ok(metadata) } - #[allow(dead_code)] - pub(crate) fn try_check_and_set( - &self, - replay_tag: &[u8; REPLAY_TAG_SIZE], - ) -> Option>> { - let mut guard = match self.inner.try_lock() { - Ok(guard) => guard, - Err(TryLockError::Poisoned(_)) => return Some(Err(PoisonError::new(()))), - Err(TryLockError::WouldBlock) => return None, + pub(crate) fn primary_bytes_and_id(&self) -> Result<(Vec, u32), NymNodeError> { + let guard = self + .inner + .lock() + .map_err(|_| NymNodeError::BloomfilterFailure { + message: "mutex got poisoned", + })?; + + let id = guard.primary.metadata.rotation_id; + let bytes = guard.primary.bytes(); + Ok((bytes, id)) + } + + pub(crate) fn secondary_bytes_and_id(&self) -> Result, u32)>, NymNodeError> { + let guard = self + .inner + .lock() + .map_err(|_| NymNodeError::BloomfilterFailure { + message: "mutex got poisoned", + })?; + + let secondary = match guard.overlap.as_ref() { + Some(overlap) => overlap, + None => { + let Some(pre_announced) = guard.pre_announced.as_ref() else { + return Ok(None); + }; + pre_announced + } }; - Some(Ok(guard.current_filter.check_and_set(replay_tag))) + let id = secondary.metadata.rotation_id; + let bytes = secondary.bytes(); + Ok(Some((bytes, id))) } +} + +// map from particular rotation id to vector of results, based on the order of requests received +type BatchCheckResult = HashMap>; +impl ReplayProtectionBloomfilters { pub(crate) fn batch_try_check_and_set( &self, - reply_tags: &[&[u8; REPLAY_TAG_SIZE]], - ) -> Option, PoisonError<()>>> { + reply_tags: &HashMap>, + ) -> Option>> { let mut guard = match self.inner.try_lock() { Ok(guard) => guard, Err(TryLockError::Poisoned(_)) => return Some(Err(PoisonError::new(()))), Err(TryLockError::WouldBlock) => return None, }; - let mut result = Vec::with_capacity(reply_tags.len()); - for tag in reply_tags { - result.push(guard.current_filter.check_and_set(tag)); - } - - // for testing throughput without disabling checks: - // return Some(Ok(vec![false; reply_tags.len()])); - - Some(Ok(result)) + Some(Ok(guard.batch_check_and_set(reply_tags))) } pub(crate) fn batch_check_and_set( &self, - reply_tags: &[&[u8; REPLAY_TAG_SIZE]], - ) -> Result, PoisonError<()>> { + reply_tags: &HashMap>, + ) -> Result>, PoisonError<()>> { let Ok(mut guard) = self.inner.lock() else { return Err(PoisonError::new(())); }; - let mut result = Vec::with_capacity(reply_tags.len()); - for tag in reply_tags { - result.push(guard.current_filter.check_and_set(tag)); - } + Ok(guard.batch_check_and_set(reply_tags)) + } +} - // for testing throughput without disabling checks: - // return Ok(vec![false; reply_tags.len()]); +struct ReplayProtectionBloomfiltersInner { + primary: RotationFilter, - Ok(result) + // don't worry, we'll never have 3 active filters at once, + // we will either have a overlap (during the first epoch of a new rotation) + // or a pre_announced (during the last epoch of the current rotation) + // during epoch transition, the following change will happen: + // primary -> overlap + // pre_announced -> primary + // I'm not using an enum because it's easier to reason about those as separate fields + overlap: Option, + pre_announced: Option, +} + +impl ReplayProtectionBloomfiltersInner { + fn batch_check_and_set( + &mut self, + reply_tags: &HashMap>, + ) -> HashMap> { + let mut result = HashMap::with_capacity(reply_tags.len()); + for (&rotation_id, reply_tags) in reply_tags { + // try to 'find' the relevant filter. we might be doing 3 reads here, but realistically it's + // going to be 'primary' most of the time and even if not, it's just few ns of overhead... + let filter = if self.primary.metadata.rotation_id == rotation_id { + Some(&mut self.primary.data) + } else if let Some(secondary) = &mut self.overlap { + // if let chaining won't be stable until 1.88 so we have to do the Option workaround + if secondary.metadata.rotation_id == rotation_id { + Some(&mut secondary.data) + } else { + None + } + } else if let Some(pre_announced) = &mut self.pre_announced { + if pre_announced.metadata.rotation_id == rotation_id { + Some(&mut pre_announced.data) + } else { + None + } + } else { + None + }; + + let Some(filter) = filter else { + // if we've received a packet from an unknown rotation, it most likely means it has been replayed + // from an older rotation, so mark it as such + result.insert(rotation_id, vec![false; reply_tags.len()]); + continue; + }; + + let mut rotation_results = Vec::with_capacity(reply_tags.len()); + for tag in reply_tags { + rotation_results.push(filter.check_and_set(tag)) + } + result.insert(rotation_id, rotation_results); + } + + result } +} - #[allow(dead_code)] - pub(crate) fn clear(&self) -> Result<(), PoisonError<()>> { - let mut guard = self.inner.lock().map_err(|_| PoisonError::new(()))?; - guard.current_filter.clear(); - Ok(()) +pub(crate) struct RotationFilter { + metadata: ReplayProtectionBloomfilterMetadata, + data: Bloom<[u8; REPLAY_TAG_SIZE]>, +} + +impl RotationFilter { + pub(crate) fn new( + items_count: usize, + fp_p: f64, + packets_received_at_creation: usize, + rotation_id: u32, + ) -> Result { + let filter = + Bloom::new_for_fp_rate(items_count, fp_p).map_err(NymNodeError::bloomfilter_failure)?; + + Ok(RotationFilter { + metadata: ReplayProtectionBloomfilterMetadata { + creation_time: OffsetDateTime::now_utc(), + packets_received_at_creation, + rotation_id, + }, + data: filter, + }) } // due to the size of the bloomfilter, extra caution has to be applied when using this method // note: we're not getting reference to bytes as this method is used when flushing data to the disk // (which takes ~30s) and we can't block the mutex for that long. - fn bytes(&self) -> Result, PoisonError<()>> { - let guard = self.inner.lock().map_err(|_| PoisonError::new(()))?; - Ok(guard.current_filter.to_bytes()) + fn bytes(&self) -> Vec { + // attach metadata bytes at the end as it would make deserialisation cheaper (as we could avoid + // copying the bloomfilter bytes twice) + let mut bloom_bytes = self.data.to_bytes(); + bloom_bytes.extend_from_slice(&self.metadata.bytes()); + bloom_bytes + } + + pub(crate) fn try_from_bytes(bytes: Vec) -> Result { + let len = bytes.len(); + if bytes.len() < ReplayProtectionBloomfilterMetadata::SERIALIZED_LEN { + return Err(NymNodeError::BloomfilterMetadataDeserialisationFailure); + } + + let mut bloom_bytes = bytes; + let metadata_bytes = + bloom_bytes.split_off(len - ReplayProtectionBloomfilterMetadata::SERIALIZED_LEN); + + Ok(RotationFilter { + metadata: ReplayProtectionBloomfilterMetadata::try_from_bytes(&metadata_bytes)?, + data: Bloom::from_bytes(bloom_bytes).map_err(NymNodeError::bloomfilter_failure)?, + }) + } + + pub(crate) fn load>(path: P) -> Result { + info!("attempting to load prior replay detection bloomfilter..."); + let path = path.as_ref(); + let mut file = File::open(path).map_err(|source| NymNodeError::BloomfilterIoFailure { + source, + path: path.to_path_buf(), + })?; + + let mut buf = Vec::new(); + file.read_to_end(&mut buf) + .map_err(|source| NymNodeError::BloomfilterIoFailure { + source, + path: path.to_path_buf(), + })?; + + RotationFilter::try_from_bytes(buf) } } diff --git a/nym-node/src/node/replay_protection/helpers.rs b/nym-node/src/node/replay_protection/helpers.rs new file mode 100644 index 00000000000..d46e9949197 --- /dev/null +++ b/nym-node/src/node/replay_protection/helpers.rs @@ -0,0 +1,34 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +pub(crate) fn parse_rotation_id_from_filename(name: &str) -> Option { + let stripped = name.strip_prefix("rot-")?; + let ext_idx = stripped.rfind(".").unwrap_or(stripped.len()); + let rotation = stripped.chars().take(ext_idx).collect::(); + rotation.parse::().ok() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parsing_rotation_id() { + let test_cases = vec![ + ("rot", None), + ("rot-123", Some(123)), + ("foo-123", None), + ("rot-123.ext", Some(123)), + ("rot-123.different-ext", Some(123)), + ("rot.123.aaa", None), + ]; + + for (raw, expected) in test_cases { + assert_eq!( + parse_rotation_id_from_filename(raw), + expected, + "failed: {raw} to {expected:?}" + ); + } + } +} diff --git a/nym-node/src/node/replay_protection/manager.rs b/nym-node/src/node/replay_protection/manager.rs new file mode 100644 index 00000000000..56b20d4ea5a --- /dev/null +++ b/nym-node/src/node/replay_protection/manager.rs @@ -0,0 +1,115 @@ +// Copyright 2025 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::config::Config; +use crate::error::NymNodeError; +use crate::node::replay_protection::bloomfilter::{ReplayProtectionBloomfilters, RotationFilter}; +use crate::node::replay_protection::items_in_bloomfilter; +use human_repr::HumanCount; +use nym_node_metrics::NymNodeMetrics; +use std::cmp::max; +use std::time::Duration; +use time::OffsetDateTime; +use tracing::info; + +#[derive(Clone)] +pub(crate) struct ReplayProtectionBloomfiltersManager { + target_fp_p: f64, + minimum_bloomfilter_packets_per_second: usize, + bloomfilter_size_multiplier: f64, + + metrics: NymNodeMetrics, + filters: ReplayProtectionBloomfilters, +} + +impl ReplayProtectionBloomfiltersManager { + pub(crate) fn new_disabled(metrics: NymNodeMetrics) -> Self { + // the exact config values are irrelevant as the filters will never be recreated + ReplayProtectionBloomfiltersManager { + target_fp_p: 0.001, + minimum_bloomfilter_packets_per_second: 1, + bloomfilter_size_multiplier: 1.0, + metrics, + filters: ReplayProtectionBloomfilters::new_disabled(), + } + } + + pub(crate) fn new( + config: &Config, + primary: RotationFilter, + secondary: Option, + metrics: NymNodeMetrics, + ) -> Self { + ReplayProtectionBloomfiltersManager { + target_fp_p: config.mixnet.replay_protection.debug.false_positive_rate, + minimum_bloomfilter_packets_per_second: config + .mixnet + .replay_protection + .debug + .bloomfilter_minimum_packets_per_second_size, + bloomfilter_size_multiplier: config + .mixnet + .replay_protection + .debug + .bloomfilter_size_multiplier, + metrics, + filters: ReplayProtectionBloomfilters::new(primary, secondary), + } + } + + pub(crate) fn bloomfilters(&self) -> ReplayProtectionBloomfilters { + self.filters.clone() + } + + pub(crate) fn primary_bytes_and_id(&self) -> Result<(Vec, u32), NymNodeError> { + self.filters.primary_bytes_and_id() + } + + pub(crate) fn secondary_bytes_and_id(&self) -> Result, u32)>, NymNodeError> { + self.filters.secondary_bytes_and_id() + } + + pub(crate) fn purge_secondary(&self) -> Result<(), NymNodeError> { + self.filters.purge_secondary() + } + + pub(crate) fn promote_pre_announced(&self) -> Result<(), NymNodeError> { + self.filters.promote_pre_announced() + } + + // TODO: actually do add some metrics + pub(crate) fn allocate_pre_announced( + &self, + rotation_id: u32, + rotation_lifetime: Duration, + ) -> Result<(), NymNodeError> { + // 1. estimated the number of items in the filter based on the extrapolated items received + // by the primary filter + let received = self.metrics.mixnet.ingress.forward_hop_packets_received() + + self.metrics.mixnet.ingress.final_hop_packets_received(); + + let primary = self.filters.primary_metadata()?; + let time_delta = OffsetDateTime::now_utc() - primary.creation_time; + let received_since_creation = received.saturating_sub(primary.packets_received_at_creation); + let received_per_second = + (received_since_creation as f64 / time_delta.as_seconds_f64()).round() as usize; + + let bf_received = max( + received_per_second, + self.minimum_bloomfilter_packets_per_second, + ); + let items_in_new_filter = items_in_bloomfilter(rotation_lifetime, bf_received); + let adjusted = + (items_in_new_filter as f64 * self.bloomfilter_size_multiplier).round() as usize; + + info!( + "allocating new bloom filter. new expected number of packets: {} that preserve fp rate of {}", + adjusted.human_count_bare(), + self.target_fp_p + ); + + // 2. allocate the filter + self.filters + .allocate_pre_announced(adjusted, self.target_fp_p, received, rotation_id) + } +} diff --git a/nym-node/src/node/replay_protection/mod.rs b/nym-node/src/node/replay_protection/mod.rs index 5d5c9d57e2b..969e4f5fd04 100644 --- a/nym-node/src/node/replay_protection/mod.rs +++ b/nym-node/src/node/replay_protection/mod.rs @@ -6,6 +6,8 @@ use std::time::Duration; pub(crate) mod background_task; pub(crate) mod bloomfilter; +mod helpers; +pub(crate) mod manager; pub fn bitmap_size(false_positive_rate: f64, items_in_filter: usize) -> usize { /// Equivalent to ln(1 / 2^ln(2)) = −ln^2(2) diff --git a/nym-node/src/node/shared_network.rs b/nym-node/src/node/shared_network.rs index 1b6ace3210e..0197b27018b 100644 --- a/nym-node/src/node/shared_network.rs +++ b/nym-node/src/node/shared_network.rs @@ -2,18 +2,26 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::error::NymNodeError; +use crate::node::key_rotation::active_keys::ActiveSphinxKeys; use crate::node::routing_filter::network_filter::NetworkRoutingFilter; use async_trait::async_trait; +use nym_crypto::asymmetric::ed25519; use nym_gateway::node::UserAgent; use nym_node_metrics::prometheus_wrapper::{PrometheusMetric, PROMETHEUS_METRICS}; use nym_task::ShutdownToken; use nym_topology::node::RoutingNode; -use nym_topology::{EpochRewardedSet, NymTopology, Role, TopologyProvider}; +use nym_topology::{ + EntryDetails, EpochRewardedSet, NodeId, NymTopology, NymTopologyMetadata, Role, + TopologyProvider, +}; use nym_validator_client::nym_api::NymApiClientExt; -use nym_validator_client::nym_nodes::{NodesByAddressesResponse, SkimmedNode}; +use nym_validator_client::nym_nodes::{ + NodesByAddressesResponse, SkimmedNode, SkimmedNodesWithMetadata, +}; use nym_validator_client::{NymApiClient, ValidatorClientError}; use std::collections::HashSet; -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; +use std::ops::Deref; use std::sync::Arc; use std::time::Duration; use tokio::sync::RwLock; @@ -22,6 +30,8 @@ use tracing::log::error; use tracing::{debug, trace, warn}; use url::Url; +const LOCAL_NODE_ID: NodeId = 1234567890; + struct NodesQuerier { client: NymApiClient, nym_api_urls: Vec, @@ -53,10 +63,10 @@ impl NodesQuerier { res } - async fn current_nymnodes(&mut self) -> Result, ValidatorClientError> { + async fn current_nymnodes(&mut self) -> Result { let res = self .client - .get_all_basic_nodes() + .get_all_basic_nodes_with_metadata() .await .inspect_err(|err| error!("failed to get network nodes: {err}")); @@ -84,16 +94,40 @@ impl NodesQuerier { } } +pub(crate) struct LocalGatewayNode { + pub(crate) active_sphinx_keys: ActiveSphinxKeys, + pub(crate) mix_host: SocketAddr, + pub(crate) identity_key: ed25519::PublicKey, + pub(crate) entry: EntryDetails, +} + +impl LocalGatewayNode { + pub(crate) fn to_routing_node(&self) -> RoutingNode { + RoutingNode { + node_id: LOCAL_NODE_ID, + mix_host: self.mix_host, + entry: Some(self.entry.clone()), + identity_key: self.identity_key, + sphinx_key: self.active_sphinx_keys.primary().deref().x25519_pubkey(), + supported_roles: nym_topology::SupportedRoles { + mixnode: false, + mixnet_entry: true, + mixnet_exit: true, + }, + } + } +} + #[derive(Clone)] pub struct CachedTopologyProvider { - gateway_node: Arc, + gateway_node: Arc, cached_network: CachedNetwork, min_mix_performance: u8, } impl CachedTopologyProvider { pub(crate) fn new( - gateway_node: RoutingNode, + gateway_node: LocalGatewayNode, cached_network: CachedNetwork, min_mix_performance: u8, ) -> Self { @@ -111,20 +145,24 @@ impl TopologyProvider for CachedTopologyProvider { let network_guard = self.cached_network.inner.read().await; let self_node = self.gateway_node.identity_key; - let mut topology = NymTopology::new_empty(network_guard.rewarded_set.clone()) - .with_additional_nodes(network_guard.network_nodes.iter().filter(|node| { - if node.supported_roles.mixnode { - node.performance.round_to_integer() >= self.min_mix_performance - } else { - true - } - })); + let mut topology = NymTopology::new( + network_guard.topology_metadata, + network_guard.rewarded_set.clone(), + Vec::new(), + ) + .with_additional_nodes(network_guard.network_nodes.iter().filter(|node| { + if node.supported_roles.mixnode { + node.performance.round_to_integer() >= self.min_mix_performance + } else { + true + } + })); - if !topology.has_node_details(self.gateway_node.node_id) { + if !topology.has_node(self.gateway_node.identity_key) { debug!("{self_node} didn't exist in topology. inserting it.",); - topology.insert_node_details(self.gateway_node.as_ref().clone()); + topology.insert_node_details(self.gateway_node.to_routing_node()); } - topology.force_set_active(self.gateway_node.node_id, Role::EntryGateway); + topology.force_set_active(LOCAL_NODE_ID, Role::EntryGateway); Some(topology) } @@ -140,6 +178,7 @@ impl CachedNetwork { CachedNetwork { inner: Arc::new(RwLock::new(CachedNetworkInner { rewarded_set: Default::default(), + topology_metadata: Default::default(), network_nodes: vec![], })), } @@ -148,6 +187,7 @@ impl CachedNetwork { struct CachedNetworkInner { rewarded_set: EpochRewardedSet, + topology_metadata: NymTopologyMetadata, network_nodes: Vec, } @@ -235,7 +275,9 @@ impl NetworkRefresher { async fn refresh_network_nodes_inner(&mut self) -> Result<(), ValidatorClientError> { let rewarded_set = self.querier.rewarded_set().await?; - let nodes = self.querier.current_nymnodes().await?; + let res = self.querier.current_nymnodes().await?; + let nodes = res.nodes; + let metadata = res.metadata; // collect all known/allowed nodes information let known_nodes = nodes @@ -264,6 +306,8 @@ impl NetworkRefresher { self.routing_filter.pending.clear().await; let mut network_guard = self.network.inner.write().await; + network_guard.topology_metadata = + NymTopologyMetadata::new(metadata.rotation_id, metadata.absolute_epoch_id); network_guard.network_nodes = nodes; network_guard.rewarded_set = rewarded_set; diff --git a/nym-node/src/throughput_tester/client.rs b/nym-node/src/throughput_tester/client.rs index a402511f7b7..3410dd98149 100644 --- a/nym-node/src/throughput_tester/client.rs +++ b/nym-node/src/throughput_tester/client.rs @@ -1,6 +1,7 @@ // Copyright 2025 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only +use crate::node::key_rotation::active_keys::ActiveSphinxKeys; use crate::throughput_tester::stats::ClientStats; use anyhow::bail; use arrayref::array_ref; @@ -14,7 +15,7 @@ use nym_crypto::asymmetric::x25519; use nym_sphinx_addressing::nodes::NymNodeRoutingAddress; use nym_sphinx_framing::codec::{NymCodec, NymCodecError}; use nym_sphinx_framing::packet::FramedNymPacket; -use nym_sphinx_params::PacketSize; +use nym_sphinx_params::{PacketSize, SphinxKeyRotation}; use nym_sphinx_routing::generate_hop_delays; use nym_sphinx_types::constants::{ EXPANDED_SHARED_SECRET_HKDF_INFO, EXPANDED_SHARED_SECRET_HKDF_SALT, @@ -28,6 +29,7 @@ use nym_task::ShutdownToken; use rand::rngs::OsRng; use sha2::Sha256; use std::net::SocketAddr; +use std::ops::Deref; use std::pin::Pin; use std::task::{Context, Poll, Waker}; use std::time::Duration; @@ -99,6 +101,7 @@ pub(crate) struct ThroughputTestingClient { listener: TcpListener, forward_connection: Framed, payload_key: PayloadKey, + key_rotation: SphinxKeyRotation, } fn rederive_lioness_payload_key(shared_secret: &[u8; 32]) -> PayloadKey { @@ -119,7 +122,7 @@ impl ThroughputTestingClient { initial_sending_delay: Duration, initial_batch_size: usize, latency_threshold: Duration, - node_keys: &x25519::KeyPair, + node_keys: ActiveSphinxKeys, node_listener: SocketAddr, stats: ClientStats, cancellation_token: ShutdownToken, @@ -137,10 +140,14 @@ impl ThroughputTestingClient { // keys of this client let ephemeral_keys = x25519::KeyPair::new(&mut rng); + let loaded_private = node_keys.primary(); + let private = loaded_private.deref(); + let public = private.x25519_pubkey(); + let route = [ Node::new( NymNodeRoutingAddress::from(node_listener).try_into()?, - (*node_keys.public_key()).into(), + public.into(), ), Node::new( NymNodeRoutingAddress::from(local_address).try_into()?, @@ -168,8 +175,8 @@ impl ThroughputTestingClient { // derive the expanded shared secret for our node so we could tag the payload to figure out latency // by tagging the packet - let shared_secret = node_keys - .private_key() + let shared_secret = private + .as_ref() .as_ref() .diffie_hellman(&header.shared_secret); let payload_key = rederive_lioness_payload_key(shared_secret.as_bytes()); @@ -189,6 +196,12 @@ impl ThroughputTestingClient { } }; + let key_rotation = if loaded_private.is_even_rotation() { + SphinxKeyRotation::EvenRotation + } else { + SphinxKeyRotation::OddRotation + }; + Ok(ThroughputTestingClient { stats, last_received_update: Instant::now(), @@ -204,6 +217,7 @@ impl ThroughputTestingClient { listener, forward_connection: Framed::new(forward_connection, NymCodec), payload_key, + key_rotation, }) } @@ -250,7 +264,13 @@ impl ThroughputTestingClient { packet_bytes.append(&mut payload_bytes); let forward_packet = NymPacket::sphinx_from_bytes(&packet_bytes)?; - Ok(FramedNymPacket::new(forward_packet, Default::default())) + // let key_rotation = if self.s + + Ok(FramedNymPacket::new( + forward_packet, + Default::default(), + self.key_rotation, + )) } async fn send_packets(&mut self) -> anyhow::Result<()> { diff --git a/nym-node/src/throughput_tester/mod.rs b/nym-node/src/throughput_tester/mod.rs index 68a7719fbdb..3371fb759cd 100644 --- a/nym-node/src/throughput_tester/mod.rs +++ b/nym-node/src/throughput_tester/mod.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::config::upgrade_helpers::try_load_current_config; +use crate::node::key_rotation::active_keys::ActiveSphinxKeys; use crate::node::NymNode; use crate::throughput_tester::client::ThroughputTestingClient; use crate::throughput_tester::global_stats::GlobalStatsUpdater; @@ -9,12 +10,10 @@ use crate::throughput_tester::stats::ClientStats; use futures::future::join_all; use human_repr::HumanDuration; use indicatif::{ProgressState, ProgressStyle}; -use nym_crypto::asymmetric::x25519; use nym_task::ShutdownToken; use rand::{thread_rng, Rng}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; -use std::sync::Arc; use std::time::Duration; use tokio::runtime; use tokio::runtime::Runtime; @@ -72,7 +71,7 @@ impl ThroughputTest { #[allow(clippy::too_many_arguments)] async fn run_testing_client( sender_id: usize, - node_keys: Arc, + node_keys: ActiveSphinxKeys, node_listener: SocketAddr, packet_latency_threshold: Duration, starting_sending_batch_size: usize, @@ -85,7 +84,7 @@ async fn run_testing_client( starting_sending_delay, starting_sending_batch_size, packet_latency_threshold, - &node_keys, + node_keys, node_listener, stats, shutdown_token, @@ -117,7 +116,7 @@ pub(crate) fn test_mixing_throughput( let nym_node = tester.prepare_nymnode(config_path)?; let listener = nym_node.config().mixnet.bind_address; - let sphinx_keys = nym_node.x25519_sphinx_keys(); + let sphinx_keys = nym_node.active_sphinx_keys()?; let mut stats = Vec::with_capacity(senders); for _ in 0..senders { diff --git a/nym-wallet/Cargo.lock b/nym-wallet/Cargo.lock index 211d475f059..d4da9c60982 100644 --- a/nym-wallet/Cargo.lock +++ b/nym-wallet/Cargo.lock @@ -415,11 +415,14 @@ version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" dependencies = [ + "brotli", "flate2", "futures-core", "memchr", "pin-project-lite", "tokio", + "zstd", + "zstd-safe", ] [[package]] @@ -940,6 +943,8 @@ version = "1.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" dependencies = [ + "jobserver", + "libc", "shlex", ] @@ -3529,6 +3534,16 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.2", + "libc", +] + [[package]] name = "jpeg-decoder" version = "0.3.1" @@ -4027,6 +4042,7 @@ dependencies = [ "tendermint-rpc", "thiserror 2.0.12", "time", + "tracing", "utoipa", ] @@ -4293,6 +4309,7 @@ version = "0.3.0" dependencies = [ "pem", "tracing", + "zeroize", ] [[package]] @@ -9270,6 +9287,34 @@ dependencies = [ "memchr", ] +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.15+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +dependencies = [ + "cc", + "pkg-config", +] + [[package]] name = "zvariant" version = "5.4.0" diff --git a/tools/internal/testnet-manager/src/cli/initialise_new_network.rs b/tools/internal/testnet-manager/src/cli/initialise_new_network.rs index 574559bf820..99174e6d518 100644 --- a/tools/internal/testnet-manager/src/cli/initialise_new_network.rs +++ b/tools/internal/testnet-manager/src/cli/initialise_new_network.rs @@ -25,6 +25,10 @@ pub(crate) struct Args { #[clap(long)] custom_epoch_duration_secs: Option, + /// Specifies custom number of epochs sphinx keys are going to be valid for + #[clap(long)] + key_validity_in_epochs: Option, + #[clap(short, long, default_value_t = OutputFormat::default())] output: OutputFormat, } @@ -38,6 +42,7 @@ pub(crate) async fn execute(args: Args) -> Result<(), NetworkManagerError> { args.built_contracts, args.network_name, args.custom_epoch_duration_secs.map(Duration::from_secs), + args.key_validity_in_epochs, ) .await? .into_loaded(); diff --git a/tools/internal/testnet-manager/src/cli/initialise_post_dkg_network.rs b/tools/internal/testnet-manager/src/cli/initialise_post_dkg_network.rs index d302177fad8..1e5d24b1bff 100644 --- a/tools/internal/testnet-manager/src/cli/initialise_post_dkg_network.rs +++ b/tools/internal/testnet-manager/src/cli/initialise_post_dkg_network.rs @@ -39,6 +39,10 @@ pub(crate) struct Args { #[clap(long)] custom_epoch_duration_secs: Option, + /// Specifies custom number of epochs sphinx keys are going to be valid for + #[clap(long)] + key_validity_in_epochs: Option, + #[clap(short, long, default_value_t = OutputFormat::default())] output: OutputFormat, } @@ -51,6 +55,7 @@ pub(crate) async fn execute(args: Args) -> Result<(), NetworkManagerError> { args.built_contracts, args.network_name, args.custom_epoch_duration_secs.map(Duration::from_secs), + args.key_validity_in_epochs, ) .await? .into(); diff --git a/tools/internal/testnet-manager/src/cli/local_ecash_apis.rs b/tools/internal/testnet-manager/src/cli/local_ecash_apis.rs index f0f9a4475a3..587e5c0375e 100644 --- a/tools/internal/testnet-manager/src/cli/local_ecash_apis.rs +++ b/tools/internal/testnet-manager/src/cli/local_ecash_apis.rs @@ -37,6 +37,10 @@ pub(crate) struct Args { #[clap(long)] custom_epoch_duration_secs: Option, + /// Specifies custom number of epochs sphinx keys are going to be valid for + #[clap(long)] + key_validity_in_epochs: Option, + #[clap(short, long, default_value_t = OutputFormat::default())] output: OutputFormat, } @@ -53,6 +57,7 @@ pub(crate) async fn execute(args: Args) -> Result<(), NetworkManagerError> { args.built_contracts, args.network_name, args.custom_epoch_duration_secs.map(Duration::from_secs), + args.key_validity_in_epochs, ) .await? .into(); diff --git a/tools/internal/testnet-manager/src/manager/dkg_skip.rs b/tools/internal/testnet-manager/src/manager/dkg_skip.rs index ec74a01af35..ea33af03fb3 100644 --- a/tools/internal/testnet-manager/src/manager/dkg_skip.rs +++ b/tools/internal/testnet-manager/src/manager/dkg_skip.rs @@ -130,6 +130,7 @@ impl NetworkManager { &self, ctx: &mut DkgSkipCtx, api_endpoints: Vec, + mut prime_api: Option, ) -> Result<(), NetworkManagerError> { ctx.println(format!( "📝 {}Generating ecash keys for all signers...", @@ -144,12 +145,23 @@ impl NetworkManager { let mut ecash_signers = Vec::new(); let mut rng = OsRng; - for (endpoint, ecash_keypair) in api_endpoints.into_iter().zip(ecash_keys.into_iter()) { + for (i, (endpoint, ecash_keypair)) in api_endpoints + .into_iter() + .zip(ecash_keys.into_iter()) + .enumerate() + { + // if available, use provided account for the first api (so that it would be permitted to do rewarding, etc.) + let cosmos_account = if i == 0 { + prime_api.take().unwrap_or(Account::new()) + } else { + Account::new() + }; + let ed25519_keypair = ed25519::KeyPair::new(&mut rng); let data = EcashSigner { ed25519_keypair, ecash_keypair, - cosmos_account: Account::new(), + cosmos_account, endpoint, }; ctx.println(format!( @@ -451,7 +463,11 @@ impl NetworkManager { let mut ctx = DkgSkipCtx::new(network)?; - self.generate_ecash_signer_data(&mut ctx, api_endpoints)?; + self.generate_ecash_signer_data( + &mut ctx, + api_endpoints, + Some(network.auxiliary_addresses.mixnet_rewarder.clone()), + )?; let current_code_id = self.validate_existing_contracts(&ctx).await?; self.persist_dkg_keys(&mut ctx, data_output_dir).await?; let new_code_id = self diff --git a/tools/internal/testnet-manager/src/manager/local_client.rs b/tools/internal/testnet-manager/src/manager/local_client.rs index c95c3062dfe..9e1d2025e2f 100644 --- a/tools/internal/testnet-manager/src/manager/local_client.rs +++ b/tools/internal/testnet-manager/src/manager/local_client.rs @@ -96,8 +96,8 @@ impl NetworkManager { let wait_fut = async { let inner_fut = async { loop { - let nodes = match api_client.get_all_basic_nodes().await { - Ok(nodes) => nodes, + let nodes = match api_client.get_all_basic_nodes_with_metadata().await { + Ok(nodes) => nodes.nodes, Err(err) => { ctx.println(format!( "❌ {} {err}", diff --git a/tools/internal/testnet-manager/src/manager/local_nodes.rs b/tools/internal/testnet-manager/src/manager/local_nodes.rs index e140ccb20a6..9e5450e4ba5 100644 --- a/tools/internal/testnet-manager/src/manager/local_nodes.rs +++ b/tools/internal/testnet-manager/src/manager/local_nodes.rs @@ -18,6 +18,7 @@ use std::ops::Deref; use std::path::{Path, PathBuf}; use std::process::Stdio; use tokio::process::Command; +use tracing::error; use zeroize::Zeroizing; struct LocalNodesCtx<'a> { @@ -158,8 +159,8 @@ impl NetworkManager { &output_file_path.display().to_string(), ]) .stdout(Stdio::null()) + .stderr(Stdio::piped()) .stdin(Stdio::null()) - .stderr(Stdio::null()) .kill_on_drop(true); if is_gateway { @@ -169,10 +170,12 @@ impl NetworkManager { cmd.args(["--mode", "mixnode"]); } - let mut child = cmd.spawn()?; - let child_fut = child.wait(); + let child = cmd.spawn()?; + let child_fut = child.wait_with_output(); let out = ctx.async_with_progress(child_fut).await?; - if !out.success() { + if !out.status.success() { + error!("nym node failure"); + println!("{}", String::from_utf8_lossy(&out.stderr)); return Err(NetworkManagerError::NymNodeExecutionFailure); } @@ -196,14 +199,16 @@ impl NetworkManager { "--output", "json", ]) - .stdin(Stdio::null()) .stdout(Stdio::null()) - .stderr(Stdio::null()) + .stderr(Stdio::piped()) + .stdin(Stdio::null()) .kill_on_drop(true) .output(); let out = ctx.async_with_progress(child).await?; if !out.status.success() { + error!("nym node failure"); + println!("{}", String::from_utf8_lossy(&out.stderr)); return Err(NetworkManagerError::NymNodeExecutionFailure); } let signature: ReducedSignatureOut = serde_json::from_slice(&out.stdout)?; diff --git a/tools/internal/testnet-manager/src/manager/network_init.rs b/tools/internal/testnet-manager/src/manager/network_init.rs index 1a0fc0e41d6..8e978e4fc6a 100644 --- a/tools/internal/testnet-manager/src/manager/network_init.rs +++ b/tools/internal/testnet-manager/src/manager/network_init.rs @@ -127,6 +127,7 @@ impl NetworkManager { &self, ctx: &InitCtx, custom_epoch_duration: Option, + key_validity_in_epochs: Option, ) -> Result { Ok(nym_mixnet_contract_common::InstantiateMsg { rewarding_validator_address: ctx @@ -165,6 +166,7 @@ impl NetworkManager { version_score_params: Default::default(), profit_margin: Default::default(), interval_operating_cost: Default::default(), + key_validity_in_epochs, }) } @@ -408,6 +410,7 @@ impl NetworkManager { &self, ctx: &mut InitCtx, custom_epoch_duration: Option, + key_validity_in_epochs: Option, ) -> Result<(), NetworkManagerError> { ctx.println(format!( "💽 {}Instantiating all the contracts...", @@ -422,7 +425,8 @@ impl NetworkManager { let code_id = ctx.network.contracts.mixnet.upload_info()?.code_id; let admin = ctx.network.contracts.mixnet.admin()?.address.clone(); ctx.set_pb_message(format!("attempting to instantiate {name} contract...")); - let init_msg = self.mixnet_init_message(ctx, custom_epoch_duration)?; + let init_msg = + self.mixnet_init_message(ctx, custom_epoch_duration, key_validity_in_epochs)?; let init_fut = ctx.admin.instantiate( code_id, &init_msg, @@ -694,6 +698,7 @@ impl NetworkManager { contracts: P, network_name: Option, custom_epoch_duration: Option, + key_validity_in_epochs: Option, ) -> Result { let network_name = self.get_network_name(network_name); let mut ctx = InitCtx::new(network_name, self.admin.deref().clone(), &self.rpc_endpoint)?; @@ -702,7 +707,7 @@ impl NetworkManager { self.upload_contracts(&mut ctx).await?; self.create_contract_admins_mnemonics(&mut ctx)?; self.transfer_admin_tokens(&ctx).await?; - self.instantiate_contracts(&mut ctx, custom_epoch_duration) + self.instantiate_contracts(&mut ctx, custom_epoch_duration, key_validity_in_epochs) .await?; self.perform_final_migrations(&mut ctx).await?; self.get_build_info(&mut ctx).await?;