diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index 5603c40f..508a6ff8 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -1,22 +1,22 @@ -name: "lint" +name: 'lint' on: push: branches: - main - dev - - "release/**" + - 'release/**' paths-ignore: - - "*.md" - - "LICENSE" + - '*.md' + - 'LICENSE' pull_request: branches: - main - dev - - "release/**" + - 'release/**' paths-ignore: - - "*.md" - - "LICENSE" + - '*.md' + - 'LICENSE' jobs: lint-web: @@ -30,7 +30,7 @@ jobs: - uses: actions/setup-node@v6 with: - node-version: "24" + node-version: '24' - uses: pnpm/action-setup@v5 with: @@ -56,5 +56,6 @@ jobs: - name: Run Biome and Prettier Lint run: pnpm lint - - name: Audit - run: pnpm audit --prod + # TODO: Restore when it works again: https://github.com/pnpm/pnpm/issues/11265 + # - name: Audit + # run: pnpm audit --prod diff --git a/.npmrc b/.npmrc index fa4e0952..5608fccf 100644 --- a/.npmrc +++ b/.npmrc @@ -1 +1,2 @@ -strict-peer-dependencies=false \ No newline at end of file +strict-peer-dependencies=false +audit=false \ No newline at end of file diff --git a/.trivyignore.yaml b/.trivyignore.yaml index f29211f5..67f4f502 100644 --- a/.trivyignore.yaml +++ b/.trivyignore.yaml @@ -1,4 +1,4 @@ vulnerabilities: - id: GHSA-wrw7-89jp-8q8g - expired_at: 2026-04-18 + expired_at: 2026-05-16 statement: 'glib is a transitive dependency of Tauri which we cannot update ourselves. Waiting for tauri to finish migration to gtk4-rs: https://github.com/tauri-apps/tauri/issues/12563' diff --git a/package.json b/package.json index d62d1507..baae2fc0 100644 --- a/package.json +++ b/package.json @@ -85,7 +85,7 @@ "html-react-parser": "^5.2.17", "itertools": "^2.6.0", "js-base64": "^3.7.8", - "lodash-es": "^4.17.23", + "lodash-es": "^4.18.1", "merge-refs": "^2.0.0", "millify": "^6.1.0", "motion": "^12.38.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f53e0d68..b047c2ca 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -114,8 +114,8 @@ importers: specifier: ^3.7.8 version: 3.7.8 lodash-es: - specifier: ^4.17.23 - version: 4.17.23 + specifier: ^4.18.1 + version: 4.18.1 merge-refs: specifier: ^2.0.0 version: 2.0.0(@types/react@19.2.14) @@ -2183,8 +2183,8 @@ packages: resolution: {integrity: sha512-Kx8hMakjX03tiGTLAIdJ+lL0htKnXjEZN6hk/tozf/WOuYGdZBJrZ+rCJRbVCugsjB3jMLn9746NsQIf5VjBMw==} engines: {node: '>=4'} - lodash-es@4.17.23: - resolution: {integrity: sha512-kVI48u3PZr38HdYz98UmfPnXl2DXrpdctLrFLCd3kOx1xUkOmpFPx7gCWWM5MPkL/fD8zb+Ph0QzjGFs4+hHWg==} + lodash-es@4.18.1: + resolution: {integrity: sha512-J8xewKD/Gk22OZbhpOVSwcs60zhd95ESDwezOFuA3/099925PdHJ7OFHNTGtajL3AlZkykD32HykiMo+BIBI8A==} lodash@4.17.23: resolution: {integrity: sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==} @@ -4907,7 +4907,7 @@ snapshots: pify: 3.0.0 strip-bom: 3.0.0 - lodash-es@4.17.23: {} + lodash-es@4.18.1: {} lodash@4.17.23: {} diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock index 20f67bd6..6f6f3ffc 100644 --- a/src-tauri/Cargo.lock +++ b/src-tauri/Cargo.lock @@ -4913,7 +4913,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ - "heck 0.5.0", + "heck 0.4.1", "itertools", "log", "multimap", @@ -5645,9 +5645,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.10" +version = "0.103.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +checksum = "8279bb85272c9f10811ae6a6c547ff594d6a7f3c6c6b02ee9726d1d0dcfcdd06" dependencies = [ "aws-lc-rs", "ring", diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml index 080ef5c1..456abc86 100644 --- a/src-tauri/Cargo.toml +++ b/src-tauri/Cargo.toml @@ -160,6 +160,9 @@ windows-sys = { version = "0.61", features = [ # HANDLE & file functions "Win32_System_IO", "Win32_System_Threading", + + # Network address change notifications (NotifyAddrChange) + "Win32_NetworkManagement_IpHelper", ] } [features] diff --git a/src-tauri/src/enterprise/service_locations/windows.rs b/src-tauri/src/enterprise/service_locations/windows.rs index f40c1044..2205eb9d 100644 --- a/src-tauri/src/enterprise/service_locations/windows.rs +++ b/src-tauri/src/enterprise/service_locations/windows.rs @@ -15,6 +15,7 @@ use defguard_wireguard_rs::{ }; use known_folders::get_known_folder_path; use log::{debug, error, warn}; +use tokio::time::sleep; use windows::{ core::PSTR, Win32::System::RemoteDesktop::{ @@ -23,6 +24,7 @@ use windows::{ }, }; use windows_acl::acl::ACL; +use windows_sys::Win32::NetworkManagement::IpHelper::NotifyAddrChange; use crate::{ enterprise::service_locations::{ @@ -36,10 +38,59 @@ use crate::{ }; const LOGIN_LOGOFF_EVENT_RETRY_DELAY_SECS: u64 = 5; +// How long to wait after a network change before attempting to connect. +// Gives DHCP time to complete and DNS to become available. +const NETWORK_STABILIZATION_DELAY: Duration = Duration::from_secs(3); +// How long to wait before restarting the network change watcher on error. +const NETWORK_CHANGE_MONITOR_RESTART_DELAY: Duration = Duration::from_secs(5); const DEFAULT_WIREGUARD_PORT: u16 = 51820; const DEFGUARD_DIR: &str = "Defguard"; const SERVICE_LOCATIONS_SUBDIR: &str = "service_locations"; +/// Watches for IP address changes on any network interface and attempts to connect to any +/// service locations that are not yet connected. This handles the case where the endpoint +/// hostname cannot be resolved at service startup because the network (e.g. Wi-Fi) is not +/// yet available. When the network comes up and an IP is assigned, this watcher fires and +/// retries the connection. +/// +/// Note: `NotifyAddrChange` also fires when WireGuard interfaces are created. This is +/// harmless because `connect_to_service_locations` skips already-connected locations. +pub(crate) async fn watch_for_network_change( + service_location_manager: Arc>, +) { + loop { + // NotifyAddrChange blocks until any IP address is added or removed on any interface. + // Passing NULL for both handle and overlapped selects the synchronous (blocking) mode. + let result = unsafe { NotifyAddrChange(std::ptr::null_mut(), std::ptr::null()) }; + + if result != 0 { + error!("NotifyAddrChange failed with error code: {result}"); + sleep(NETWORK_CHANGE_MONITOR_RESTART_DELAY).await; + continue; + } + + debug!( + "Network address change detected, waiting {NETWORK_STABILIZATION_DELAY:?}s for \ + network to stabilize before attempting service location connections..." + ); + sleep(NETWORK_STABILIZATION_DELAY).await; + + debug!("Attempting to connect to service locations after network change"); + match service_location_manager + .write() + .unwrap() + .connect_to_service_locations() + { + Ok(_) => { + debug!("Service location connect attempt after network change completed"); + } + Err(err) => { + warn!("Failed to connect to service locations after network change: {err}"); + } + } + } +} + pub(crate) async fn watch_for_login_logoff( service_location_manager: Arc>, ) -> Result<(), ServiceLocationError> { @@ -59,7 +110,7 @@ pub(crate) async fn watch_for_login_logoff( } Err(err) => { error!("Failed waiting for login/logoff event: {err:?}"); - tokio::time::sleep(Duration::from_secs(LOGIN_LOGOFF_EVENT_RETRY_DELAY_SECS)).await; + sleep(Duration::from_secs(LOGIN_LOGOFF_EVENT_RETRY_DELAY_SECS)).await; continue; } }; @@ -680,12 +731,19 @@ impl ServiceLocationManager { Ok(()) } - pub(crate) fn connect_to_service_locations(&mut self) -> Result<(), ServiceLocationError> { + /// Attempts to connect to all service locations that are not already connected. + /// + /// Returns `Ok(true)` if every location is now connected (either it was already connected or + /// it was successfully connected during this call), and `Ok(false)` if at least one location + /// failed to connect (indicating that a retry may be worthwhile). + pub(crate) fn connect_to_service_locations(&mut self) -> Result { debug!("Attempting to auto-connect to VPN..."); let data = self.load_service_locations()?; debug!("Loaded {} instance(s) from ServiceLocationApi", data.len()); + let mut all_connected = true; + for instance_data in data { debug!( "Found service locations for instance ID: {}", @@ -725,10 +783,11 @@ impl ServiceLocationManager { if let Err(err) = self.setup_service_location_interface(&location, &instance_data.private_key) { - debug!( + warn!( "Failed to setup service location interface for '{}': {err:?}", location.name ); + all_connected = false; continue; } @@ -749,7 +808,7 @@ impl ServiceLocationManager { debug!("Auto-connect attempt completed"); - Ok(()) + Ok(all_connected) } pub fn save_service_locations( diff --git a/src-tauri/src/service/windows.rs b/src-tauri/src/service/windows.rs index c72a176a..dec55f23 100644 --- a/src-tauri/src/service/windows.rs +++ b/src-tauri/src/service/windows.rs @@ -7,7 +7,7 @@ use std::{ use clap::Parser; use error; -use tokio::runtime::Runtime; +use tokio::{runtime::Runtime, time::sleep}; use windows_service::{ define_windows_service, service::{ @@ -20,7 +20,8 @@ use windows_service::{ use crate::{ enterprise::service_locations::{ - windows::watch_for_login_logoff, ServiceLocationError, ServiceLocationManager, + windows::{watch_for_login_logoff, watch_for_network_change}, + ServiceLocationError, ServiceLocationManager, }, service::{ config::Config, @@ -32,6 +33,8 @@ use crate::{ static SERVICE_NAME: &str = "DefguardService"; const SERVICE_TYPE: ServiceType = ServiceType::OWN_PROCESS; const LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS: Duration = Duration::from_secs(5); +const SERVICE_LOCATION_CONNECT_RETRY_COUNT: u32 = 5; +const SERVICE_LOCATION_CONNECT_RETRY_DELAY: Duration = Duration::from_secs(30); pub fn run() -> Result<(), windows_service::Error> { // Register generated `ffi_service_main` with the system and start the service, blocking @@ -112,25 +115,69 @@ fn run_service() -> Result<(), DaemonError> { let service_location_manager = Arc::new(RwLock::new(service_location_manager)); - // Spawn service location management task + // Spawn network change monitoring task first so NotifyAddrChange is registered as early + // as possible, minimising the window in which a network event could be missed before + // the watcher is listening. The retry task below is the backstop for any event that + // still slips through that window. let service_location_manager_clone = service_location_manager.clone(); runtime.spawn(async move { let manager = service_location_manager_clone; + info!("Starting network change monitoring"); + watch_for_network_change(manager.clone()).await; + error!("Network change monitoring ended unexpectedly."); + }); - info!("Starting service location management task"); - - info!("Attempting to auto-connect to service locations"); - match manager.write().unwrap().connect_to_service_locations() { - Ok(()) => { - info!("Auto-connect to service locations completed successfully"); + // Spawn service location auto-connect task with retries. + // Each attempt skips locations that are already connected, so it is safe to call + // connect_to_service_locations repeatedly. The retry loop exists to handle the case + // where the connection may fail initially at startup because the network + // (e.g. Wi-Fi) is not yet available (mainly DNS resolution issues), and serves as + // a backstop for any network events missed by the watcher above. + // If all locations connect successfully on a given attempt, no further retries are made. + let service_location_manager_connect = service_location_manager.clone(); + runtime.spawn(async move { + for attempt in 1..=SERVICE_LOCATION_CONNECT_RETRY_COUNT { + info!( + "Attempting to auto-connect to service locations \ + (attempt {attempt}/{SERVICE_LOCATION_CONNECT_RETRY_COUNT})" + ); + match service_location_manager_connect + .write() + .unwrap() + .connect_to_service_locations() + { + Ok(true) => { + info!( + "All service locations connected successfully \ + (attempt {attempt}/{SERVICE_LOCATION_CONNECT_RETRY_COUNT})" + ); + break; + } + Ok(false) => { + warn!( + "Auto-connect attempt {attempt}/{SERVICE_LOCATION_CONNECT_RETRY_COUNT} \ + completed with some failures" + ); + } + Err(err) => { + warn!( + "Auto-connect attempt {attempt}/{SERVICE_LOCATION_CONNECT_RETRY_COUNT} \ + failed: {err}" + ); + } } - Err(err) => { - warn!( - "Error while trying to auto-connect to service locations: {err}. \ - Will continue monitoring for login/logoff events.", - ); + + if attempt < SERVICE_LOCATION_CONNECT_RETRY_COUNT { + sleep(SERVICE_LOCATION_CONNECT_RETRY_DELAY).await; } } + info!("Service location auto-connect task finished"); + }); + + // Spawn login/logoff monitoring task, runs concurrently with the tasks above. + let service_location_manager_clone = service_location_manager.clone(); + runtime.spawn(async move { + let manager = service_location_manager_clone; info!("Starting login/logoff event monitoring"); loop { @@ -140,14 +187,14 @@ fn run_service() -> Result<(), DaemonError> { "Login/logoff event monitoring ended unexpectedly. Restarting in \ {LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS:?}..." ); - tokio::time::sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS).await; + sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS).await; } Err(e) => { error!( "Error in login/logoff event monitoring: {e}. Restarting in \ {LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS:?}...", ); - tokio::time::sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS).await; + sleep(LOGIN_LOGOFF_MONITORING_RESTART_DELAY_SECS).await; info!("Restarting login/logoff event monitoring"); } } diff --git a/src-tauri/src/wg_config.rs b/src-tauri/src/wg_config.rs index b7d91b7c..78938a7c 100644 --- a/src-tauri/src/wg_config.rs +++ b/src-tauri/src/wg_config.rs @@ -1,6 +1,6 @@ +use std::{array::TryFromSliceError, net::IpAddr, path::Path}; + use base64::{prelude::BASE64_STANDARD, DecodeError, Engine}; -use std::path::Path; -use std::{array::TryFromSliceError, net::IpAddr}; use thiserror::Error; use x25519_dalek::{PublicKey, StaticSecret};