diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5334038..c945e84 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,6 +38,26 @@ jobs: include: - build: stable toolchain: stable + # Service containers to run with `container-job` + services: + # Label used to access the service container + postgres: + # Docker Hub image + image: postgres:latest + # Provide the password for postgres + env: + POSTGRES_PASSWORD: password + POSTGRES_USER: postgres + POSTGRES_DB: postgres + # Map port 5432 on the container to 5432 on the host + ports: + - 5432:5432 + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 steps: - uses: actions/checkout@v3 @@ -52,6 +72,13 @@ jobs: with: command: test args: --workspace --all-features + env: + # Use localhost instead of postgres hostname + POSTGRES_HOST: localhost + # Add database connection details + POSTGRES_USER: postgres + POSTGRES_PASSWORD: password + POSTGRES_DB: postgres formatting: name: Rustfmt Check diff --git a/.gitignore b/.gitignore index a338de3..0b96a51 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ **/target Cargo.lock -.env \ No newline at end of file +.env +.idea \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 6d64b4e..d6f3255 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,15 @@ [workspace] resolver = "2" -members = ["eventastic", "eventastic_postgres", "examples/*"] +members = [ + "eventastic", + "eventastic_outbox_postgres", + "eventastic_postgres", + "examples/*", +] + +[workspace.package] +license = "MIT" +license-file = "LICENSE" [workspace.dependencies] # Eventastic dependencies @@ -10,6 +19,7 @@ serde = { version = "1", features = ["derive"] } thiserror = "1" # Eventastic postgres dependencies +async-stream = "0.3.6" sqlx = { version = "0.8", features = [ "runtime-tokio-rustls", "postgres", @@ -22,5 +32,4 @@ uuid = { version = "1", features = ["v4", "serde"] } chrono = "0.4" serde_json = "1" tokio = { version = "1", features = ["full"] } -futures-util = "0.3" -anyhow = "1" +futures-util = "0.3.31" diff --git a/LICENSE b/LICENSE index 04b23b1..2afa4af 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,7 @@ MIT License Copyright (c) 2019 Danilo Cianfrone +Copyright (c) 2025 Jonathan Donaldson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 19d0a00..7cb3e32 100644 --- a/README.md +++ b/README.md @@ -1,100 +1,199 @@ # Eventastic -This is an opinionated fork of [Eventually-rs](https://github.com/get-eventually/eventually-rs). +A type-safe event sourcing and CQRS library for Rust with PostgreSQL persistence. -Eventastic enforces the use of transactions, handles idempotency and removes command handling abstractions. +## Features -## Examples -See full examples in [examples/bank](https://github.com/jdon/eventastic/blob/main/examples/bank/src/main.rs) +- **Strongly-typed aggregates and events** - Define your domain model with Rust structs and enums +- **Mandatory transactions** for ACID guarantees +- **Built-in idempotency** prevents duplicate event processing +- **Optimistic concurrency control** detects conflicting modifications +- **Transactional outbox pattern** for reliable side effects +- **Snapshot optimization** for fast aggregate loading +- **In-memory repository** for testing and development + +## Quick Start + +Define your domain aggregate and events: ```rust -#[tokio::main] -async fn main() -> Result<(), anyhow::Error> { - // Setup postgres repo - let repository = get_repository().await; +use eventastic::aggregate::{Aggregate, Context, Root, SideEffect}; +use eventastic::event::DomainEvent; +use eventastic::memory::InMemoryRepository; +use eventastic::repository::Repository; + +#[derive(Clone, Debug)] +struct BankAccount { + id: String, + balance: i64, +} + +#[derive(Clone, Debug, PartialEq, Eq)] +enum AccountEvent { + Opened { event_id: String, account_id: String, initial_balance: i64 }, + Deposited { event_id: String, amount: i64 }, + Withdrawn { event_id: String, amount: i64 }, +} - // Run our side effects handler in a background task - tokio::spawn(async { - let repository = get_repository().await; +impl DomainEvent for AccountEvent { + type EventId = String; + fn id(&self) -> &Self::EventId { + match self { + AccountEvent::Opened { event_id, .. } => event_id, + AccountEvent::Deposited { event_id, .. } => event_id, + AccountEvent::Withdrawn { event_id, .. } => event_id, + } + } +} - let _ = repository - .start_outbox(SideEffectContext {}, std::time::Duration::from_secs(5)) - .await; - }); +// Define a no-op side effect type +#[derive(Clone, Debug, PartialEq, Eq)] +struct NoSideEffect; - // Start transaction - let mut transaction = repository.begin_transaction().await?; +impl SideEffect for NoSideEffect { + type SideEffectId = String; + fn id(&self) -> &Self::SideEffectId { + unreachable!("No side effects are produced") + } +} + +impl Aggregate for BankAccount { + const SNAPSHOT_VERSION: u64 = 1; + type AggregateId = String; + type DomainEvent = AccountEvent; + type ApplyError = String; + type SideEffect = NoSideEffect; + + fn aggregate_id(&self) -> &Self::AggregateId { + &self.id + } + + fn apply_new(event: &Self::DomainEvent) -> Result { + match event { + AccountEvent::Opened { account_id, initial_balance, .. } => { + Ok(BankAccount { + id: account_id.clone(), + balance: *initial_balance, + }) + } + _ => Err("Account must be opened first".to_string()), + } + } + + fn apply(&mut self, event: &Self::DomainEvent) -> Result<(), Self::ApplyError> { + match event { + AccountEvent::Opened { .. } => Err("Account already exists".to_string()), + AccountEvent::Deposited { amount, .. } => { + self.balance += amount; + Ok(()) + } + AccountEvent::Withdrawn { amount, .. } => { + if self.balance >= *amount { + self.balance -= amount; + Ok(()) + } else { + Err("Insufficient funds".to_string()) + } + } + } + } + + fn side_effects(&self, _event: &Self::DomainEvent) -> Option> { + None + } +} +``` + +Use the aggregate with transactions: - let account_id = Uuid::new_v4(); +```rust +#[tokio::main] +async fn main() -> Result<(), Box> { + let repository = InMemoryRepository::::new(); + + // Create new account using the Root trait + let mut account: Context = BankAccount::record_new( + AccountEvent::Opened { + event_id: "evt-1".to_string(), + account_id: "acc-123".to_string(), + initial_balance: 1000, + } + )?; + + // Deposit money + account.record_that(AccountEvent::Deposited { + event_id: "evt-2".to_string(), + amount: 500, + })?; + + // Save with transaction + let mut transaction = repository.begin_transaction().await?; + transaction.store(&mut account).await?; + transaction.commit()?; - let event_id = Uuid::new_v4(); + // Load account + let loaded_account = repository.load(&"acc-123".to_string()).await?; + assert_eq!(loaded_account.state().balance, 1500); - let add_event_id = Uuid::new_v4(); + Ok(()) +} +``` - // Open a bank account - let event = AccountEvent::Open { - event_id, - account_id, - starting_balance: 21, - email: "user@example.com".into(), - }; +## Architecture - let mut account = Account::record_new(event)?; +Eventastic is built around four core concepts: - // Add funds to newly created account - let add_event = AccountEvent::Add { - event_id: add_event_id, - amount: 324, - }; +- **Aggregates** - Domain entities that apply events to update their state +- **Events** - Immutable records of what happened in your domain +- **Context** - Wrapper that tracks aggregate state and uncommitted events +- **Repository** - Persistence layer with transactional guarantees - // Record add fund events. - // Record takes in the transaction, as it does idempotency checks with the db. - account - .record_that(&mut transaction, add_event.clone()) - .await?; +## Why Eventastic? - // Save uncommitted events and side effects in the db. - transaction.store(&mut account).await?; +### Transaction-First Design - // Commit the transaction - transaction.commit().await?; +Unlike many event sourcing libraries, Eventastic requires transactions for all write operations. This provides: - // Get the aggregate from the db - let mut transaction = repository.begin_transaction().await?; +- **ACID compliance** - All changes are atomic and consistent +- **Idempotency** - Duplicate events are detected and handled gracefully +- **Concurrency safety** - Optimistic locking prevents data races +- **Side effect reliability** - External operations are processed via outbox pattern - let mut account: Context = transaction.get(&account_id).await?; +### Rust Benefits - // Check our balance is correct - assert_eq!(account.state().balance, 345); +Using Rust provides compile-time guarantees: - // Trying to apply the same event id but with different content gives us an IdempotencyError - let changed_add_event = AccountEvent::Add { - event_id: add_event_id, - amount: 123, - }; +- Events must implement required traits (DomainEvent, Clone, etc.) +- Aggregates must handle all event types in match statements +- Error handling is explicit with Result types +- No null pointer exceptions or runtime type errors - let err = account - .record_that(&mut transaction, changed_add_event) - .await - .expect_err("failed to get error"); +### Production Ready - assert!(matches!(err, RecordError::IdempotencyError(_, _))); +Eventastic includes features needed for production systems: - // Applying the already applied event, will be ignored and return Ok - account.record_that(&mut transaction, add_event).await?; +- Automatic snapshot creation and loading +- Comprehensive error types with structured information +- Transaction-based consistency guarantees - transaction.commit().await?; +## Persistence - let mut transaction = repository.begin_transaction().await?; +The library provides multiple repository implementations: - let account: Context = transaction.get(&account_id).await?; +- `eventastic::memory::InMemoryRepository` - For testing and development +- `eventastic_postgres::PostgresRepository` - For production PostgreSQL storage with: + - Event and snapshot storage with versioning + - Full transaction support with optimistic concurrency control + - Optional encryption for sensitive data + - Database migrations support +- `eventastic_outbox_postgres::TableOutbox` - Transactional outbox pattern for reliable side effect processing - // Balance hasn't changed since the event wasn't actually applied - assert_eq!(account.state().balance, 345); +## Examples - println!("Got account {account:?}"); +See the `examples/` directory for complete implementations: - tokio::time::sleep(std::time::Duration::from_secs(30)).await; - Ok(()) -} -``` +- **Bank** - Full banking domain demonstrating: + - Account creation and management + - Transaction processing + - Side effects via outbox pattern + - Idempotency and concurrency handling \ No newline at end of file diff --git a/eventastic/Cargo.toml b/eventastic/Cargo.toml index 873bbc3..bf0ab2b 100644 --- a/eventastic/Cargo.toml +++ b/eventastic/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "eventastic" -version = "0.4.0" -edition = "2021" +version = "0.5.0" +edition = "2024" license = "MIT" readme = "../README.md" repository = "https://github.com/jdon/eventastic" @@ -17,5 +17,7 @@ keywords = ["architecture", "ddd", "event-sourcing", "cqrs", "es"] [dependencies] async-trait = { workspace = true } futures = { workspace = true } -serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } + +[dev-dependencies] +tokio = { version = "1.0", features = ["full"] } diff --git a/eventastic/src/aggregate.rs b/eventastic/src/aggregate.rs index 5d7f459..095b8e4 100644 --- a/eventastic/src/aggregate.rs +++ b/eventastic/src/aggregate.rs @@ -2,14 +2,14 @@ //! //! ## What is an Aggregate? //! -//! An [Aggregate] is the most important concept in your domain. +//! An [`Aggregate`] is the most important concept in your domain. //! //! It represents the entities your business domain is composed of, //! and the business logic your domain is exposing. //! //! For example: in an Order Management bounded-context (e.g. a //! microservice), the concepts of Order or Customer are two potential -//! [Aggregate]s. +//! [`Aggregate`]s. //! //! Aggregates expose mutations with the concept of **commands**: //! from the previous example, an Order might expose some commands such as @@ -24,16 +24,36 @@ //! //! Aggregates should provide a way to **fold** Domain Events on the //! current value of the state, to produce the next state. +//! +//! ## Aggregates and Events +//! +//! Aggregates consume events to maintain their state: +//! +//! ### **Consuming Events** +//! +//! Aggregates apply events to change their state: +//! - [`Aggregate::apply_new()`] creates new aggregate instances from "creation" events +//! - [`Aggregate::apply()`] modifies existing aggregate state with subsequent events +//! - Both methods validate events against current state and business rules +//! +//! ### **Creating Events** +//! +//! Business logic methods (typically on the aggregate) create events that represent what happened: +//! - Commands are validated and translated into domain events +//! - Events must implement the [`DomainEvent`] trait for unique identification +//! - Events are recorded via [`Context::record_that()`](crate::aggregate::Context::record_that) +//! - Recorded events are held in [`Context`](crate::aggregate::Context) until persistence +//! +//! For the complete event lifecycle and persistence patterns, see [`crate::event`] and [`crate::repository`]. -use crate::event::Event; +use crate::event::DomainEvent; use std::fmt::Debug; mod root; -use async_trait::async_trait; pub use root::*; -/// An Aggregate represents a Domain Model that, through an Aggregate [Root], +/// An Aggregate represents a Domain Model that, through an Aggregate [`Root`], /// acts as a _transactional boundary_. /// /// Aggregates are also used to enforce Domain invariants @@ -44,25 +64,40 @@ pub use root::*; /// a Domain Event, which is then applied to the current state /// using the [`Aggregate::apply`] method. /// -/// More on Aggregates can be found here: `` -pub trait Aggregate: Sized + Send + Sync + Clone { - /// The current version of the snapshot to store. - /// This number should be increased when a breaking change is made to the apply functions. +pub trait Aggregate: Sized + Clone { + /// The current version of the aggregate's snapshot format. + /// + /// This version number tracks the compatibility of stored snapshots with the current + /// aggregate implementation. When you make breaking changes to the aggregate structure + /// or apply logic that would make existing snapshots incompatible, increment this number. + /// + /// ## When to increment: + /// - Adding/removing/renaming fields in the aggregate struct + /// - Changing field types or serialization format + /// - Modifying apply logic in ways that change state calculation + /// - Any change that would cause a stored snapshot to be invalid + /// + /// ## How it works: + /// The repository compares this version against the version stored with each snapshot. + /// If they don't match, the repository will ignore the incompatible snapshot and + /// rebuild the aggregate state by replaying all events from the beginning. const SNAPSHOT_VERSION: u64; /// The type used to uniquely identify the Aggregate. - type AggregateId: Send + Sync + Clone + Debug + Eq + PartialEq; + type AggregateId: Clone + Debug + Eq + PartialEq; /// The type of Domain Events that interest this Aggregate. /// Usually, this type should be an `enum`. - type DomainEvent: Send + Sync + Clone + Debug + Eq + PartialEq + Event; - - /// The type used to uniquely identify the a given domain event. - type DomainEventId: Send + Sync + Clone + Debug + Eq + PartialEq; + /// + /// This type must implement the [`DomainEvent`] trait, + /// which provides unique event identification for idempotency checking. + /// + /// See the [`crate::event`] module documentation for guidance on designing domain events. + type DomainEvent: Clone + Debug + Eq + PartialEq + DomainEvent; /// The error type that can be returned by [`Aggregate::apply`] when /// mutating the Aggregate state. - type ApplyError: Send + Sync + Debug; + type ApplyError; /// The type of side effect that this aggregate can produce. /// Usually, this type should be an `enum`. @@ -73,6 +108,10 @@ pub trait Aggregate: Sized + Send + Sync + Clone { /// Create a new Aggregate through a Domain Event. /// + /// This method takes a [`DomainEvent`] and creates + /// the initial state of the aggregate. Typically, this should only accept + /// "creation" events that establish the aggregate's identity. + /// /// # Errors /// /// The method can return an error if the event to apply is unexpected @@ -81,6 +120,10 @@ pub trait Aggregate: Sized + Send + Sync + Clone { /// Mutates the state of an Aggregate through a Domain Event. /// + /// This method takes a [`DomainEvent`] and applies + /// the change to the aggregate's state. This is where your business logic + /// validates the event and updates the aggregate accordingly. + /// /// # Errors /// /// The method can return an error if the event to apply is unexpected @@ -88,34 +131,121 @@ pub trait Aggregate: Sized + Send + Sync + Clone { fn apply(&mut self, event: &Self::DomainEvent) -> Result<(), Self::ApplyError>; /// Generates a list of side effects for this given aggregate and domain event + /// /// The domain event has already been applied to the aggregate fn side_effects(&self, event: &Self::DomainEvent) -> Option>; } -pub trait SideEffect: Send + Sync + Debug { +pub trait SideEffect { /// The type used to uniquely identify this side effect. - type Id: Send + Sync + Debug + Clone; - /// The error type that can be returned when calling a [`SideEffectHandler::handle`] - type Error: Send + Sync + Debug; + type SideEffectId; - /// Returns read access to the [`SideEffect::Id`] - fn id(&self) -> &Self::Id; + /// Returns read access to the [`SideEffect::SideEffectId`] + fn id(&self) -> &Self::SideEffectId; } -#[async_trait] -pub trait SideEffectHandler { - type SideEffect: SideEffect; - - /// Handles a side effect - /// - /// If Ok(()) is returned, the side effect is complete and it will be deleted from the repository. - /// - /// If Err((true, Error)) is returned, the side effect be will requeued - /// - /// if Err((false, Error)) is returned, the side effect won't be requeued - async fn handle( - &self, - msg: &Self::SideEffect, - retires: u16, - ) -> Result<(), (bool, ::Error)>; +#[cfg(test)] +mod tests { + use super::*; + use crate::test_fixtures::*; + + // Tests that Aggregate::apply_new creates a new aggregate from a creation event + #[test] + fn test_aggregate_creation() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let calc = TestCounter::apply_new(&reset_event).unwrap(); + + assert_eq!(calc.id, "calc-1"); + assert_eq!(calc.result, 0); + assert_eq!(calc.operations_count, 0); + } + + // Tests that Aggregate::apply_new rejects non-creation events + #[test] + fn test_aggregate_creation_with_invalid_event() { + let add_event = create_add_event("add-1", 10); + let result = TestCounter::apply_new(&add_event); + assert!(matches!(result, Err(TestError::InvalidOperation))); + } + + // Tests that Aggregate::apply correctly processes multiple event types and updates state + #[test] + fn test_aggregate_apply_events() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut calc = TestCounter::apply_new(&reset_event).unwrap(); + + // Apply addition + let add_event = create_add_event("add-1", 15); + calc.apply(&add_event).unwrap(); + assert_eq!(calc.result, 15); + assert_eq!(calc.operations_count, 1); + + // Apply subtraction + let subtract_event = create_subtract_event("sub-1", 5); + calc.apply(&subtract_event).unwrap(); + assert_eq!(calc.result, 10); + assert_eq!(calc.operations_count, 2); + + // Apply multiplication + let multiply_event = create_multiply_event("mul-1", 3); + calc.apply(&multiply_event).unwrap(); + assert_eq!(calc.result, 30); + assert_eq!(calc.operations_count, 3); + } + + // Tests that Aggregate::apply returns appropriate errors for invalid operations + #[test] + fn test_aggregate_apply_error() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut calc = TestCounter::apply_new(&reset_event).unwrap(); + + // Try to apply reset to existing aggregate (invalid) + let another_reset = create_reset_event("reset-2", "calc-2"); + let result = calc.apply(&another_reset); + assert!(matches!(result, Err(TestError::InvalidOperation))); + + // Try to multiply by zero (invalid) + let multiply_zero = create_multiply_event("mul-zero", 0); + let result = calc.apply(&multiply_zero); + assert!(matches!(result, Err(TestError::DivisionByZero))); + } + + // Tests that Aggregate::side_effects generates the correct side effects for different events + #[test] + fn test_side_effects_generation() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let calc = TestCounter::apply_new(&reset_event).unwrap(); + + // Reset event generates 2 side effects + let side_effects = calc.side_effects(&reset_event); + assert!(side_effects.is_some()); + let effects = side_effects.unwrap(); + assert_eq!(effects.len(), 2); + + // Add event generates 1 side effect + let add_event = create_add_event("add-1", 10); + let side_effects = calc.side_effects(&add_event); + assert!(side_effects.is_some()); + let effects = side_effects.unwrap(); + assert_eq!(effects.len(), 1); + + // Multiply event generates no side effects + let multiply_event = create_multiply_event("mul-1", 2); + let side_effects = calc.side_effects(&multiply_event); + assert!(side_effects.is_none()); + } + + // Tests that Aggregate::aggregate_id returns the correct identifier + #[test] + fn test_aggregate_id() { + let reset_event = create_reset_event("reset-1", "my-counter"); + let calc = TestCounter::apply_new(&reset_event).unwrap(); + assert_eq!(calc.aggregate_id(), "my-counter"); + } + + // Tests that the aggregate has the expected SNAPSHOT_VERSION constant + #[test] + fn test_snapshot_version() { + assert_eq!(TestCounter::SNAPSHOT_VERSION, 1); + } } diff --git a/eventastic/src/aggregate/root.rs b/eventastic/src/aggregate/root.rs index 8f792f3..90baf90 100644 --- a/eventastic/src/aggregate/root.rs +++ b/eventastic/src/aggregate/root.rs @@ -1,12 +1,20 @@ -use crate::repository::{RepositoryTransaction, Snapshot}; +use futures::TryStreamExt; + +use crate::repository::{RepositoryError, RepositoryReader, RepositoryWriter, Snapshot}; use crate::{ aggregate::Aggregate, - event::{Event, EventStoreEvent}, + event::{DomainEvent, EventStoreEvent}, }; use std::fmt::Debug; -/// A context object that should be used by the Aggregate [Root] methods to -/// access the [Aggregate] state and to record new Domain Events. +/// A context object that should be used by the Aggregate [`Root`] methods to +/// access the [`Aggregate`] state and to record new Domain Events. +/// +/// The Context wraps an aggregate and tracks: +/// - The current aggregate state +/// - The aggregate version for optimistic concurrency control +/// - Uncommitted events that haven't been persisted yet +/// - Uncommitted side effects to be processed #[derive(Debug, Clone)] #[must_use] pub struct Context @@ -15,7 +23,7 @@ where { aggregate: T, version: u64, - uncommitted_events: Vec>, + uncommitted_events: Vec>, uncommitted_side_effects: Vec, } @@ -28,41 +36,35 @@ where self.aggregate.aggregate_id() } - /// Returns the current version for the [Aggregate]. + /// Returns the current version for the [`Aggregate`]. pub fn version(&self) -> u64 { self.version } - /// Returns the current snapshot of the [Aggregate]. + /// Returns the current snapshot version for the [`Aggregate`]. pub fn snapshot_version(&self) -> u64 { T::SNAPSHOT_VERSION } - /// Returns the list of uncommitted, recorded Domain [Event]s from the [Context] + /// Returns the list of uncommitted, recorded Domain Events from the [`Context`] /// and resets the internal list to its default value. #[doc(hidden)] - pub fn take_uncommitted_events( - &mut self, - ) -> Vec> { + pub fn take_uncommitted_events(&mut self) -> Vec> { std::mem::take(&mut self.uncommitted_events) } - /// Returns the list of uncommitted, recorded [`Aggregate::SideEffect`]s from the [Context] + /// Returns the list of uncommitted, recorded [`Aggregate::SideEffect`]s from the [`Context`] /// and resets the internal list to its default value. #[doc(hidden)] pub fn take_uncommitted_side_effects(&mut self) -> Vec { std::mem::take(&mut self.uncommitted_side_effects) } - /// Creates a new [Context] instance from a Domain [Event] - /// while rehydrating an [Aggregate]. - /// - /// # Errors - /// - /// The method can return an error if the event to apply is unexpected - /// given the current state of the Aggregate. - pub(crate) fn rehydrate_from( - event: &EventStoreEvent, + /// Creates a new [`Context`] instance from a Domain Event + /// while rehydrating an [`Aggregate`]. + #[doc(hidden)] + pub fn rehydrate_from( + event: &EventStoreEvent, ) -> Result, T::ApplyError> { Ok(Context { version: event.version, @@ -72,16 +74,12 @@ where }) } - /// Applies a new Domain [Event] to the [Context] while rehydrating - /// an [Aggregate]. - /// - /// # Errors - /// - /// The method can return an error if the event to apply is unexpected - /// given the current state of the Aggregate. - pub(crate) fn apply_rehydrated_event( + /// Applies a new Domain Event to the [`Context`] while rehydrating + /// an [`Aggregate`]. + #[doc(hidden)] + pub fn apply_rehydrated_event( mut self, - event: &EventStoreEvent, + event: &EventStoreEvent, ) -> Result, T::ApplyError> { self.version += 1; debug_assert!(self.version == event.version); @@ -89,96 +87,42 @@ where Ok(self) } - /// Checks if the event exists in the repository and that they are equal - pub(crate) async fn check_idempotency( - &self, - repository: &mut R, - aggregate_id: &T::AggregateId, - event: &T::DomainEvent, - ) -> Result>::DbError>> - where - R: RepositoryTransaction, - { - if let Some(saved_event) = repository - .get_event(aggregate_id, event.id()) - .await - .map_err(RecordError::Repository)? - { - if saved_event.event != *event { - return Err(RecordError::IdempotencyError( - saved_event.event, - event.clone(), - )); - } - return Ok(true); - } - - if let Some(existing_event) = self.uncommitted_events.iter().find(|e| e.id == *event.id()) { - if existing_event.event != *event { - return Err(RecordError::IdempotencyError( - existing_event.event.clone(), - event.clone(), - )); - } - return Ok(true); - }; - - Ok(false) - } - pub(crate) fn record_new(event: T::DomainEvent) -> Result, T::ApplyError> { let aggregate = T::apply_new(&event)?; - let mut uncommitted_side_effects = vec![]; - if let Some(mut side_effects) = aggregate.side_effects(&event) { - uncommitted_side_effects.append(&mut side_effects); - } + // Create the event store event first + let event_store_event = EventStoreEvent { + id: event.id().clone(), + version: 0, + event, + }; + + // Get side effects (if any) + let uncommitted_side_effects = aggregate + .side_effects(&event_store_event.event) + .unwrap_or_default(); - let root = Context { + // Create the context + Ok(Context { version: 0, aggregate, - uncommitted_events: vec![EventStoreEvent { - id: event.id().clone(), - version: 0, - event, - }], + uncommitted_events: vec![event_store_event], uncommitted_side_effects, - }; - - Ok(root) + }) } - /// Returns read access to the [Aggregate] state. + /// Returns read access to the [`Aggregate`] state. pub fn state(&self) -> &T { &self.aggregate } - /// Records a change to the [Aggregate] [Root], expressed by the specified + /// Records a change to the [`Aggregate`] [`Root`], expressed by the specified /// Domain Event. - /// # Errors - /// - /// The method can return an error if the event to apply is unexpected - /// given the current state of the Aggregate. - pub async fn record_that( - &mut self, - repository: &mut R, - event: T::DomainEvent, - ) -> Result<(), RecordError>::DbError>> - where - R: RepositoryTransaction, - { - // Check if the event is has already been applied, if so let's ignore it. - if self - .check_idempotency(repository, self.aggregate_id(), &event) - .await? - { - return Ok(()); - } - - self.aggregate.apply(&event).map_err(RecordError::Apply)?; + pub fn record_that(&mut self, event: T::DomainEvent) -> Result<(), T::ApplyError> { + self.aggregate.apply(&event)?; self.version += 1; - if let Some(mut side_effects) = self.aggregate.side_effects(&event) { - self.uncommitted_side_effects.append(&mut side_effects); + if let Some(side_effects) = self.aggregate.side_effects(&event) { + self.uncommitted_side_effects.extend(side_effects); } self.uncommitted_events.push(EventStoreEvent { @@ -189,19 +133,203 @@ where Ok(()) } + + /// Saves the aggregate and its uncommitted events to the repository. + /// This method handles concurrency control and idempotency checks. + pub async fn save(&mut self, transaction: &mut R) -> Result<(), SaveError> + where + R: RepositoryWriter, + { + let events_to_commit = self.take_uncommitted_events(); + + if events_to_commit.is_empty() { + return Ok(()); + } + + let side_effects_to_commit = self.take_uncommitted_side_effects(); + + let aggregate_id = self.aggregate_id(); + + let snapshot_version = self.snapshot_version(); + let snapshot_to_store = self.state(); + + let snapshot = Snapshot { + snapshot_version, + aggregate: snapshot_to_store.clone(), + version: self.version(), + }; + + // When we insert the events, it's possible that the events have already been inserted + // If that's the case, we need to check if the previously inserted events are the same as the ones we have + let inserted_event_ids = transaction + .store_events(aggregate_id, events_to_commit.clone()) + .await + .map_err(SaveError::Repository)?; + + if inserted_event_ids.len() != events_to_commit.len() { + // We failed to insert one or more of the events, it's possible that the events have already been inserted + // If that's the case, we need to check if the previously inserted events are the same as the ones we have + for event in events_to_commit { + if !inserted_event_ids.contains(&event.id) { + if let Some(saved_event) = transaction + .get_event(aggregate_id, event.id()) + .await + .map_err(SaveError::Repository)? + { + if saved_event.event != event.event { + return Err(SaveError::IdempotencyError( + saved_event.event, + event.event, + )); + } + } else { + // The not inserted event was not found in the event store, this happens if a different event was inserted with the same version and aggregate id + // This is a fatal error, so return early + return Err(SaveError::OptimisticConcurrency( + aggregate_id.clone(), + event.version, + )); + } + } + } + } + + transaction + .store_snapshot(snapshot) + .await + .map_err(SaveError::Repository)?; + + transaction + .store_side_effects(side_effects_to_commit) + .await?; + + Ok(()) + } + + /// Loads an aggregate from the repository by replaying its event stream. + /// + /// This method first attempts to load a snapshot if available, then replays + /// any events that occurred after the snapshot to reconstruct the current state. + pub async fn load( + reader: &mut R, + aggregate_id: &T::AggregateId, + ) -> Result< + Context, + RepositoryError< + T::ApplyError, + <::DomainEvent as DomainEvent>::EventId, + R::DbError, + >, + > + where + R: RepositoryReader, + { + let snapshot = reader.get_snapshot(aggregate_id).await?; + + let (context, version) = snapshot + .map(|s| { + ( + Some(Context { + aggregate: s.aggregate, + version: s.version, + uncommitted_events: Vec::new(), + uncommitted_side_effects: Vec::new(), + }), + s.version + 1, // Start from next event + ) + }) + .unwrap_or((None, 0)); + + let ctx = reader + .stream_from(aggregate_id, version) + .map_err(RepositoryError::Repository) + .try_fold(context, |ctx: Option>, event| async move { + match ctx { + None => Context::rehydrate_from(&event).map(Some), + Some(ctx) => ctx.apply_rehydrated_event(&event).map(Some), + } + .map_err(|e| RepositoryError::Apply(event.id, e)) + }) + .await?; + + ctx.ok_or(RepositoryError::AggregateNotFound) + } + + /// Regenerates side effects for a specific event by loading the aggregate + /// up to and including that event. + /// + /// This method reconstructs the aggregate state at the point when the specified + /// event was applied, then calls the aggregate's `side_effects` method to + /// regenerate any side effects that should have been produced. + /// + /// This is useful for: + /// - Recovering from side effect processing failures + /// - Debugging side effect generation + /// - Replaying side effects for specific events + /// + /// # Returns + /// + /// - `Ok(Some(Vec))` if side effects were generated + /// - `Ok(None)` if no side effects were generated or the event wasn't found + /// - `Err` if there was an error loading the aggregate or applying events + pub async fn regenerate_side_effects( + reader: &mut R, + aggregate_id: &T::AggregateId, + event_id: &<::DomainEvent as DomainEvent>::EventId, + ) -> Result< + Option>, + RepositoryError< + T::ApplyError, + <::DomainEvent as DomainEvent>::EventId, + R::DbError, + >, + > + where + R: RepositoryReader, + { + use futures::{StreamExt, TryStreamExt}; + + // Get the specific event first to ensure it exists + let event = reader.get_event(aggregate_id, event_id).await?; + + let Some(target_event) = event else { + return Ok(None); + }; + + // Stream all events from the beginning up to and including the target version + let context = reader + .stream_from(aggregate_id, 0) + .take_while(|result| { + futures::future::ready(match result { + Ok(e) => e.version <= target_event.version, + Err(_) => true, + }) + }) + .map_err(RepositoryError::Repository) + .try_fold(None, |ctx: Option>, event| async move { + match ctx { + None => Context::rehydrate_from(&event).map(Some), + Some(c) => c.apply_rehydrated_event(&event).map(Some), + } + .map_err(|e| RepositoryError::Apply(event.id, e)) + }) + .await?; + + let Some(aggregate) = context else { + return Ok(None); + }; + + // Generate side effects with aggregate in the correct state + Ok(aggregate.aggregate.side_effects(&target_event.event)) + } } -/// List of possible errors that can be returned by when recording events using [`Context::record_that`] +/// List of possible errors that can be returned when recording events using [`Context::save`]. #[derive(Debug, thiserror::Error)] -pub enum RecordError +pub enum SaveError where T: Aggregate, { - /// The [Event] failed to be applied to the [Aggregate]. - /// This usually implies that the event is invalid for given state of the aggregate. - #[error("Failed to rehydrate aggregate from event stream. {0:?}")] - Apply(T::ApplyError), - /// This error is returned when the event in the repository with the same ID /// doesn't have the same content. #[error("Idempotency Error. Saved event {0:?} does not equal {1:?}")] @@ -211,20 +339,11 @@ where /// an unexpected error while streaming back the Aggregate's Event Stream. #[error("Event store failed while streaming events: {0}")] Repository(#[from] DE), -} -impl From> for Context -where - T: Aggregate, -{ - fn from(value: Snapshot) -> Self { - Self { - aggregate: value.aggregate, - version: value.version, - uncommitted_events: Vec::new(), - uncommitted_side_effects: Vec::new(), - } - } + /// This error is returned when the Repository fails to insert the event + /// because the version already exists, indicating a concurrent modification. + #[error("Optimistic Concurrency Error")] + OptimisticConcurrency(T::AggregateId, u64), } pub trait Root @@ -243,3 +362,299 @@ where } impl Root for T where T: Aggregate {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_fixtures::*; + + // Tests that Context::record_new creates a new context with proper initial state + #[test] + fn test_context_record_new() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let context = TestCounter::record_new(reset_event.clone()).unwrap(); + + assert_eq!(context.state().id, "calc-1"); + assert_eq!(context.state().result, 0); + assert_eq!(context.version(), 0); + assert_eq!(context.aggregate_id(), "calc-1"); + assert_eq!(context.snapshot_version(), TestCounter::SNAPSHOT_VERSION); + } + + // Tests that Context::record_that applies events and increments version correctly + #[test] + fn test_context_record_that() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + + // Record addition event + let add_event = create_add_event("add-1", 15); + context.record_that(add_event).unwrap(); + + assert_eq!(context.state().result, 15); + assert_eq!(context.version(), 1); + + // Record subtraction event + let subtract_event = create_subtract_event("sub-1", 5); + context.record_that(subtract_event).unwrap(); + + assert_eq!(context.state().result, 10); + assert_eq!(context.version(), 2); + } + + // Tests that Context::record_that properly handles and propagates aggregate errors + #[test] + fn test_context_record_that_error() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + + // Try to record invalid event + let multiply_zero_event = create_multiply_event("mul-zero", 0); + let result = context.record_that(multiply_zero_event); + assert!(matches!(result, Err(TestError::DivisionByZero))); + + // State should remain unchanged + assert_eq!(context.state().result, 0); + assert_eq!(context.version(), 0); + } + + // Tests that Context correctly tracks version increments across multiple events + #[test] + fn test_context_version_tracking() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + assert_eq!(context.version(), 0); + + for i in 1..=5 { + let add_event = create_add_event(&format!("add-{i}"), i); + context.record_that(add_event).unwrap(); + assert_eq!(context.version(), i as u64); + } + } + + // Tests that Context::take_uncommitted_events returns events and empties the list + #[test] + fn test_context_uncommitted_events() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + + let add_event = create_add_event("add-1", 10); + context.record_that(add_event).unwrap(); + + let events = context.take_uncommitted_events(); + assert_eq!(events.len(), 2); // Reset + Add + + // After taking, should be empty + let empty_events = context.take_uncommitted_events(); + assert_eq!(empty_events.len(), 0); + } + + // Tests that Context::take_uncommitted_side_effects returns side effects and empties the list + #[test] + fn test_context_side_effects() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + + let add_event = create_add_event("add-1", 5); + context.record_that(add_event).unwrap(); + + let side_effects = context.take_uncommitted_side_effects(); + assert_eq!(side_effects.len(), 3); // 2 from reset + 1 from add + + // After taking, should be empty + let empty_side_effects = context.take_uncommitted_side_effects(); + assert_eq!(empty_side_effects.len(), 0); + } + + // Tests that Context::state provides read-only access to the aggregate state + #[test] + fn test_context_state_access() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let context = TestCounter::record_new(reset_event).unwrap(); + + // Test state() method + let state = context.state(); + assert_eq!(state.id, "calc-1"); + assert_eq!(state.result, 0); + assert_eq!(state.operations_count, 0); + } + + // Tests that Context::snapshot_version returns the aggregate's SNAPSHOT_VERSION + #[test] + fn test_context_snapshot_version() { + let reset_event = create_reset_event("reset-1", "calc-1"); + let context = TestCounter::record_new(reset_event).unwrap(); + + // Should return the aggregate's SNAPSHOT_VERSION constant + assert_eq!(context.snapshot_version(), TestCounter::SNAPSHOT_VERSION); + assert_eq!(context.snapshot_version(), 1); + + // Test that snapshot_version is correctly implemented + let version = context.snapshot_version(); + assert_eq!(version, 1u64); + assert_eq!(version, TestCounter::SNAPSHOT_VERSION); + } + + // Tests Context::rehydrate_from function for creating context from events + #[test] + fn test_context_rehydrate_from() { + use crate::event::EventStoreEvent; + + let reset_event = create_reset_event("reset-1", "calc-1"); + let store_event = EventStoreEvent::new("reset-1".to_string(), 0, reset_event); + + let context = Context::::rehydrate_from(&store_event).unwrap(); + + assert_eq!(context.state().id, "calc-1"); + assert_eq!(context.state().result, 0); + assert_eq!(context.version(), 0); + assert!(context.uncommitted_events.is_empty()); + assert!(context.uncommitted_side_effects.is_empty()); + } + + // Tests Context::apply_rehydrated_event for applying events during replay + #[test] + fn test_context_apply_rehydrated_event() { + use crate::event::EventStoreEvent; + + let reset_event = create_reset_event("reset-1", "calc-1"); + let store_event = EventStoreEvent::new("reset-1".to_string(), 0, reset_event); + let context = Context::::rehydrate_from(&store_event).unwrap(); + + let add_event = create_add_event("add-1", 10); + let add_store_event = EventStoreEvent::new("add-1".to_string(), 1, add_event); + + let updated_context = context.apply_rehydrated_event(&add_store_event).unwrap(); + + assert_eq!(updated_context.state().result, 10); + assert_eq!(updated_context.version(), 1); + assert!(updated_context.uncommitted_events.is_empty()); + assert!(updated_context.uncommitted_side_effects.is_empty()); + } + + // Tests Context::apply_rehydrated_event error handling + #[test] + fn test_context_apply_rehydrated_event_error() { + use crate::event::EventStoreEvent; + + let reset_event = create_reset_event("reset-1", "calc-1"); + let store_event = EventStoreEvent::new("reset-1".to_string(), 0, reset_event); + let context = Context::::rehydrate_from(&store_event).unwrap(); + + // Try to apply an invalid event (multiply by zero) + let invalid_event = create_multiply_event("mul-zero", 0); + let invalid_store_event = EventStoreEvent::new("mul-zero".to_string(), 1, invalid_event); + + let result = context.apply_rehydrated_event(&invalid_store_event); + assert!(matches!(result, Err(TestError::DivisionByZero))); + } + + // Tests Context::rehydrate_from error handling + #[test] + fn test_context_rehydrate_from_error() { + use crate::event::EventStoreEvent; + + // Try to create context from non-creation event + let add_event = create_add_event("add-1", 10); + let store_event = EventStoreEvent::new("add-1".to_string(), 0, add_event); + + let result = Context::::rehydrate_from(&store_event); + assert!(matches!(result, Err(TestError::InvalidOperation))); + } + + // Tests the record_new function that creates new contexts from events + #[test] + fn test_context_record_new_with_side_effects() { + // Test that side effects are properly generated during record_new + let reset_event = create_reset_event("reset-1", "calc-1"); + let context = Context::::record_new(reset_event).unwrap(); + + // Reset events generate 2 side effects + assert_eq!(context.uncommitted_side_effects.len(), 2); + + // Check the side effects content + let side_effects = &context.uncommitted_side_effects; + assert!(side_effects.iter().any(|se| matches!( + se, + crate::test_fixtures::TestSideEffect::LogOperation { .. } + ))); + assert!( + side_effects + .iter() + .any(|se| matches!(se, crate::test_fixtures::TestSideEffect::NotifyUser { .. })) + ); + } + + // Tests the case where no side effects are generated + #[test] + fn test_context_record_new_no_side_effects() { + use crate::test_fixtures::*; + + // Create an aggregate that doesn't generate side effects from initial event + #[derive(Clone, Debug, PartialEq, Eq)] + struct SimpleCounts { + id: String, + count: i32, + } + + #[derive(Clone, Debug, PartialEq, Eq, Hash)] + enum SimpleEvent { + Created { event_id: String, id: String }, + } + + impl crate::event::DomainEvent for SimpleEvent { + type EventId = String; + fn id(&self) -> &Self::EventId { + match self { + SimpleEvent::Created { event_id, .. } => event_id, + } + } + } + + impl crate::aggregate::Aggregate for SimpleCounts { + const SNAPSHOT_VERSION: u64 = 1; + type AggregateId = String; + type DomainEvent = SimpleEvent; + type ApplyError = String; + type SideEffect = TestSideEffect; + + fn aggregate_id(&self) -> &Self::AggregateId { + &self.id + } + + fn apply_new(event: &Self::DomainEvent) -> Result { + match event { + SimpleEvent::Created { id, .. } => Ok(SimpleCounts { + id: id.clone(), + count: 0, + }), + } + } + + fn apply(&mut self, _event: &Self::DomainEvent) -> Result<(), Self::ApplyError> { + Ok(()) + } + + fn side_effects(&self, _event: &Self::DomainEvent) -> Option> { + None // No side effects + } + } + + let create_event = SimpleEvent::Created { + event_id: "evt-1".to_string(), + id: "simple-1".to_string(), + }; + + let context = Context::::record_new(create_event).unwrap(); + + // Should have no side effects + assert_eq!(context.uncommitted_side_effects.len(), 0); + assert_eq!(context.version(), 0); + assert_eq!(context.state().id, "simple-1"); + + // Test additional method calls + assert_eq!(context.aggregate_id(), &"simple-1".to_string()); + let state_ref = context.state(); + assert_eq!(state_ref.count, 0); + } +} diff --git a/eventastic/src/event.rs b/eventastic/src/event.rs index 959ffda..98b7ce3 100644 --- a/eventastic/src/event.rs +++ b/eventastic/src/event.rs @@ -1,47 +1,437 @@ -//! Module `event` contains types and abstractions helpful for working -//! with Domain Events. +//! Domain events and event store abstractions for event sourcing. +//! +//! ## Overview +//! +//! This module provides the core abstractions for working with domain events in an event sourcing +//! system. Domain events represent significant business occurrences that have happened in your +//! domain and serve as the source of truth for aggregate state. +//! +//! ## Key Concepts +//! +//! ### Domain Events +//! +//! Domain events are immutable records of business-relevant facts that have occurred. They are: +//! +//! - **Immutable** - Once created, events never change +//! - **Uniquely identifiable** - Each event has a unique ID for idempotency +//! - **Business-focused** - Express what happened in domain terms +//! - **Complete** - Contain all necessary information about the occurrence +//! +//! ### Event Store Events +//! +//! [`EventStoreEvent`] wraps domain events with additional metadata needed for persistence: +//! +//! - **Version** - Enables optimistic concurrency control +//! - **ID** - Extracted from the domain event for quick access +//! - **Domain Event** - The actual business event +//! +//! ## Eventastic Workflow +//! +//! Once you have domain events (created by your business logic), they flow through +//! the eventastic system in a well-defined lifecycle: +//! +//! ### **1. Event Creation** +//! Your business logic creates domain events representing what happened: +//! ```rust,ignore +//! // Your application code creates events +//! let event = AccountEvent::MoneyWithdrawn { +//! event_id: Uuid::new_v4(), +//! amount: 100 +//! }; +//! ``` +//! +//! ### **2. Event Recording** +//! Events are recorded in [`Context`](crate::aggregate::Context), which applies them to +//! the aggregate state and queues them for persistence: +//! ```rust,ignore +//! context.record_that(event)?; // Applies event and adds to uncommitted events +//! ``` +//! +//! ### **3. Event Persistence** +//! The repository wraps events in [`EventStoreEvent`] and stores them transactionally: +//! ```rust,ignore +//! transaction.store(&mut context).await?; // Persists all uncommitted events +//! transaction.commit().await?; +//! ``` +//! +//! ### **4. Event Replay** +//! Stored events are replayed to reconstruct aggregate state during loading: +//! ```rust,ignore +//! let aggregate = repository.load(&aggregate_id).await?; // Replays all events +//! ``` +//! +//! This workflow ensures **exactly-once processing**, **ACID compliance**, and **full auditability** +//! of all business operations. +//! +//! **Note**: Command processing (validating commands and deciding which events to create) +//! is handled by your application code, not by this library. +//! +//! ## Idempotency +//! +//! Event IDs are crucial for idempotency - attempting to store the same event ID twice will either +//! succeed (if content is identical) or fail with an idempotency error (if content differs). +//! This ensures exactly-once processing of business events. +//! +//! ## Examples +//! +//! For complete examples showing events in context, see: +//! - The [banking example](https://github.com/jdon/eventastic/tree/main/examples/bank) for production patterns +//! - [`crate::aggregate`] module documentation for how events are applied to aggregates +//! - [`crate::repository`] module documentation for event persistence patterns use std::fmt::Debug; -use futures::stream::BoxStream; -use serde::{Deserialize, Serialize}; - -/// An [`Event`] that will be / has been persisted to the Event Store. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct EventStoreEvent +/// A [`DomainEvent`] that has been wrapped with metadata for persistence. +/// +/// This struct represents a domain event along with the metadata needed for storing +/// and retrieving it from the event store. It's used internally by the eventastic +/// framework to manage event persistence and concurrency control. +/// +/// ## Fields +/// +/// - **`id`** - The unique identifier extracted from the domain event, used for idempotency checking +/// - **`version`** - The version number of this event within the aggregate's event stream +/// - **`event`** - The actual domain event containing the business data +/// +/// ## Versioning and Concurrency +/// +/// The `version` field enables optimistic concurrency control. When multiple transactions +/// try to modify the same aggregate simultaneously, version conflicts are detected and +/// one transaction will fail with an [`OptimisticConcurrency`](crate::aggregate::SaveError::OptimisticConcurrency) error. +/// +/// ## Usage +/// +/// `EventStoreEvent` is typically created automatically when you call +/// [`Context::record_that()`](crate::aggregate::Context::record_that) or retrieved +/// when replaying events to reconstruct aggregate state. +/// +/// ```rust +/// use eventastic::event::{DomainEvent, EventStoreEvent}; +/// +/// #[derive(Clone, PartialEq, Eq, Debug)] +/// enum MyEvent { +/// Created { event_id: String, value: i32 }, +/// } +/// +/// impl DomainEvent for MyEvent { +/// type EventId = String; +/// fn id(&self) -> &Self::EventId { +/// match self { +/// MyEvent::Created { event_id, .. } => event_id, +/// } +/// } +/// } +/// +/// let domain_event = MyEvent::Created { +/// event_id: "evt-123".to_string(), +/// value: 42, +/// }; +/// +/// let store_event = EventStoreEvent::new( +/// "evt-123".to_string(), +/// 1, // version +/// domain_event, +/// ); +/// +/// assert_eq!(store_event.id(), "evt-123"); +/// assert_eq!(store_event.version(), 1); +/// ``` +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct EventStoreEvent where - Id: Send + Debug, - Self: Send + Sync, - Evt: Send + Sync + Clone + Eq + PartialEq, + Evt: DomainEvent, { - /// The id of the event - pub id: Id, + /// The unique identifier of the event, extracted from the domain event. + /// + /// This ID is used for idempotency checking - attempting to store the same + /// event ID twice will either succeed (if content is identical) or fail + /// with an idempotency error (if content differs). + pub id: Evt::EventId, - // The version of the event + /// The version number of this event within the aggregate's event stream. + /// + /// Versions start at 0 and increment for each new event. This enables + /// optimistic concurrency control by detecting when two transactions + /// try to modify the same aggregate simultaneously. pub version: u64, - /// The actual Domain Event. + /// The actual domain event containing the business data. + /// + /// This is the event that was created by your business logic and contains + /// all the information needed to apply the change to the aggregate state. pub event: Evt, } -/// A domain event. -pub trait Event +impl EventStoreEvent where - Id: Send + Debug, + Evt: DomainEvent, { - fn id(&self) -> &Id; -} + /// Creates a new `EventStoreEvent`. + pub fn new(id: Evt::EventId, version: u64, event: Evt) -> Self { + Self { id, version, event } + } -impl Event for EventStoreEvent -where - Id: Send + Debug, - Self: Send + Sync, - Evt: Send + Sync + Clone + Eq + PartialEq, -{ - fn id(&self) -> &Id { + /// Returns the id of the event. + pub fn id(&self) -> &Evt::EventId { &self.id } + + /// Returns the version of the event. + pub fn version(&self) -> u64 { + self.version + } +} + +/// A domain event represents something significant that happened in your domain. +/// +/// Domain events are the building blocks of event sourcing. They capture +/// business-relevant facts that have occurred and are used to reconstruct +/// aggregate state through event replay. +/// +/// ## Design Principles +/// +/// When designing domain events, follow these principles: +/// +/// - **Past tense naming** - Events describe what happened ("OrderCreated", not "CreateOrder") +/// - **Immutable** - Events should never change after creation +/// - **Complete** - Include all data needed to apply the change +/// - **Unique IDs** - Each event instance must have a unique identifier +/// - **Business-focused** - Express domain concepts, not technical details +/// +/// ## Relationship to Aggregates +/// +/// Domain events are closely tied to [`Aggregate`](crate::aggregate::Aggregate)s: +/// +/// - Created by your business logic methods +/// - Recorded in [`Context`](crate::aggregate::Context) via `record_that()` +/// - Automatically applied to aggregate state during recording +/// - Used to reconstruct aggregate state through event replay +/// +/// ## Idempotency and Event IDs +/// +/// Event IDs are crucial for ensuring exactly-once processing. The eventastic framework +/// uses these IDs to detect duplicate events and maintain system consistency. +/// +/// ## String ID Example +/// +/// ```rust +/// use eventastic::event::DomainEvent; +/// +/// #[derive(Clone, PartialEq, Eq, Debug)] +/// enum OrderEvent { +/// Created { event_id: String, order_id: String, customer_id: String }, +/// ItemAdded { event_id: String, item_id: String, quantity: u32 }, +/// Shipped { event_id: String, tracking_number: String }, +/// } +/// +/// impl DomainEvent for OrderEvent { +/// type EventId = String; +/// +/// fn id(&self) -> &Self::EventId { +/// match self { +/// OrderEvent::Created { event_id, .. } => event_id, +/// OrderEvent::ItemAdded { event_id, .. } => event_id, +/// OrderEvent::Shipped { event_id, .. } => event_id, +/// } +/// } +/// } +/// +/// // Create some events +/// let created_event = OrderEvent::Created { +/// event_id: "evt-1".to_string(), +/// order_id: "order-123".to_string(), +/// customer_id: "customer-456".to_string(), +/// }; +/// +/// let item_added_event = OrderEvent::ItemAdded { +/// event_id: "evt-2".to_string(), +/// item_id: "item-789".to_string(), +/// quantity: 2, +/// }; +/// +/// // Access event IDs +/// assert_eq!(created_event.id(), "evt-1"); +/// assert_eq!(item_added_event.id(), "evt-2"); +/// +/// // Events can be compared for equality +/// let created_event_copy = created_event.clone(); +/// assert_eq!(created_event, created_event_copy); +/// ``` +/// +/// ## Using Different ID Types +/// +/// Domain events can use different types for their IDs: +/// +/// ```rust +/// use eventastic::event::DomainEvent; +/// +/// // Using numeric IDs +/// #[derive(Clone, PartialEq, Eq, Debug)] +/// enum UserEvent { +/// Registered { event_id: u64, user_id: u64, email: String }, +/// EmailChanged { event_id: u64, new_email: String }, +/// } +/// +/// impl DomainEvent for UserEvent { +/// type EventId = u64; +/// +/// fn id(&self) -> &Self::EventId { +/// match self { +/// UserEvent::Registered { event_id, .. } => event_id, +/// UserEvent::EmailChanged { event_id, .. } => event_id, +/// } +/// } +/// } +/// +/// // Create an event with numeric ID +/// let user_event = UserEvent::Registered { +/// event_id: 12345, +/// user_id: 67890, +/// email: "user@example.com".to_string(), +/// }; +/// +/// assert_eq!(*user_event.id(), 12345); +/// ``` +pub trait DomainEvent: Clone + Eq + PartialEq { + /// The type used to uniquely identify this event. + /// + /// This is typically a UUID, String, or other unique identifier type + /// that can distinguish this specific event instance from all others. + /// + /// ## Recommended Types + /// + /// - **`uuid::Uuid`** - Best for production systems, provides global uniqueness + /// - **`String`** - Good for development and testing, human-readable + /// - **`u64`** - Compact, good for high-performance scenarios with centralized ID generation + type EventId: Debug + Clone + Eq + PartialEq; + + /// Returns the unique identifier for this event instance. + /// + /// Each event must have a unique ID that distinguishes it from all other + /// events. This ID is used for idempotency checks and event deduplication. + /// + /// ## Idempotency + /// + /// The framework uses this ID to ensure exactly-once processing: + /// - If an event with the same ID and identical content is stored again, it succeeds + /// - If an event with the same ID but different content is stored, it fails with an idempotency error + /// + /// ## Usage in Aggregates + /// + /// The framework uses this ID internally for event deduplication and consistency. + /// You simply provide unique IDs when creating events in your business logic. + fn id(&self) -> &Self::EventId; } -/// Stream is a stream of [`EventStoreEvent`] Domain Events. -pub type Stream<'a, Id, Evt, Err> = BoxStream<'a, Result, Err>>; +#[cfg(test)] +mod tests { + use super::*; + use crate::test_fixtures::*; + + // Tests that EventStoreEvent::new creates a proper wrapper around domain events + #[test] + fn test_event_store_event_creation() { + let domain_event = create_add_event("add-1", 10); + let store_event = EventStoreEvent::new("add-1".to_string(), 5, domain_event.clone()); + + assert_eq!(store_event.id(), "add-1"); + assert_eq!(store_event.version(), 5); + assert_eq!(store_event.event, domain_event); + } + + // Tests that EventStoreEvent fields are accessible directly (struct-style access) + #[test] + fn test_event_store_event_fields() { + let domain_event = create_reset_event("reset-1", "calc-1"); + let store_event = EventStoreEvent { + id: "reset-1".to_string(), + version: 0, + event: domain_event.clone(), + }; + + assert_eq!(store_event.id, "reset-1"); + assert_eq!(store_event.version, 0); + assert_eq!(store_event.event, domain_event); + } + + // Tests that EventStoreEvent implements equality correctly (same ID, version, and event content) + #[test] + fn test_event_store_event_equality() { + let domain_event = create_add_event("add-1", 15); + let event1 = EventStoreEvent::new("add-1".to_string(), 3, domain_event.clone()); + let event2 = EventStoreEvent::new("add-1".to_string(), 3, domain_event); + + assert_eq!(event1, event2); + } + + // Tests that EventStoreEvent accessor methods return the correct values + #[test] + fn test_event_store_event_accessor_methods() { + let domain_event = create_subtract_event("sub-1", 25); + let store_event = EventStoreEvent::new("sub-1".to_string(), 7, domain_event); + + // Test id() accessor + assert_eq!(store_event.id(), "sub-1"); + + // Test version() accessor + assert_eq!(store_event.version(), 7); + } + + // Tests that DomainEvent::id returns the correct identifier for all event types + #[test] + fn test_domain_event_id_access() { + let reset_event = create_reset_event("reset-123", "calc-1"); + assert_eq!(reset_event.id(), "reset-123"); + + let add_event = create_add_event("add-456", 100); + assert_eq!(add_event.id(), "add-456"); + + let subtract_event = create_subtract_event("sub-789", 50); + assert_eq!(subtract_event.id(), "sub-789"); + + let multiply_event = create_multiply_event("mul-999", 3); + assert_eq!(multiply_event.id(), "mul-999"); + } + + // Tests that domain events with same content are equal, different content are not equal + #[test] + fn test_domain_event_equality() { + let event1 = create_add_event("add-1", 100); + let event2 = create_add_event("add-1", 100); + let event3 = create_add_event("add-1", 200); // Different value + let event4 = create_add_event("add-2", 100); // Different ID + + assert_eq!(event1, event2); + assert_ne!(event1, event3); + assert_ne!(event1, event4); + } + + // Tests that domain events can be cloned and the clone equals the original + #[test] + fn test_domain_event_clone() { + let original = create_multiply_event("mul-1", 5); + let cloned = original.clone(); + + assert_eq!(original, cloned); + assert_eq!(original.id(), cloned.id()); + } + + // Tests that EventStoreEvent::id() returns a reference to the stored ID + #[test] + fn test_event_store_event_id_reference() { + let domain_event = create_add_event("test-id", 42); + let store_event = EventStoreEvent::new("test-id".to_string(), 1, domain_event); + + // Test that id() returns a reference to the actual stored ID + let id_ref = store_event.id(); + assert_eq!(id_ref, "test-id"); + assert_eq!(id_ref, &store_event.id); // Reference to the actual field + + // Additional explicit call to ensure coverage + assert_eq!(*store_event.id(), "test-id".to_string()); + + // Test direct field access via id() method + let field_value = &store_event.id; + assert_eq!(store_event.id(), field_value); + } +} diff --git a/eventastic/src/lib.rs b/eventastic/src/lib.rs index d35ebc1..9775635 100644 --- a/eventastic/src/lib.rs +++ b/eventastic/src/lib.rs @@ -1,3 +1,153 @@ +//! # Eventastic +//! +//! A type-safe event sourcing and CQRS library for Rust with PostgreSQL persistence. +//! +//! Eventastic provides strong consistency guarantees through mandatory transactions, +//! built-in idempotency checking, and reliable side effect processing via the +//! transactional outbox pattern. +//! +//! ## Quick Start +//! +//! ```rust +//! use eventastic::aggregate::{Aggregate, Context, Root, SideEffect}; +//! use eventastic::event::DomainEvent; +//! use eventastic::memory::InMemoryRepository; +//! use eventastic::repository::Repository; +//! +//! // Define your domain aggregate +//! #[derive(Clone, Debug)] +//! struct Counter { +//! id: String, +//! value: i32, +//! } +//! +//! // Define your domain events +//! #[derive(Clone, Debug, PartialEq, Eq)] +//! enum CounterEvent { +//! Created { event_id: String, initial_value: i32 }, +//! Incremented { event_id: String, amount: i32 }, +//! } +//! +//! impl DomainEvent for CounterEvent { +//! type EventId = String; +//! fn id(&self) -> &Self::EventId { +//! match self { +//! CounterEvent::Created { event_id, .. } => event_id, +//! CounterEvent::Incremented { event_id, .. } => event_id, +//! } +//! } +//! } +//! +//! // Define side effects (optional) +//! #[derive(Clone, Debug, PartialEq, Eq)] +//! struct NoSideEffect { +//! id: String, +//! } +//! +//! impl SideEffect for NoSideEffect { +//! type SideEffectId = String; +//! fn id(&self) -> &Self::SideEffectId { +//! &self.id +//! } +//! } +//! +//! // Implement the Aggregate trait +//! impl Aggregate for Counter { +//! const SNAPSHOT_VERSION: u64 = 1; +//! type AggregateId = String; +//! type DomainEvent = CounterEvent; +//! type ApplyError = String; +//! type SideEffect = NoSideEffect; +//! +//! fn aggregate_id(&self) -> &Self::AggregateId { +//! &self.id +//! } +//! +//! fn apply_new(event: &Self::DomainEvent) -> Result { +//! match event { +//! CounterEvent::Created { initial_value, .. } => Ok(Counter { +//! id: "counter-1".to_string(), +//! value: *initial_value, +//! }), +//! _ => Err("Counter must be created first".to_string()), +//! } +//! } +//! +//! fn apply(&mut self, event: &Self::DomainEvent) -> Result<(), Self::ApplyError> { +//! match event { +//! CounterEvent::Created { .. } => Err("Counter already created".to_string()), +//! CounterEvent::Incremented { amount, .. } => { +//! self.value += amount; +//! Ok(()) +//! } +//! } +//! } +//! +//! fn side_effects(&self, _event: &Self::DomainEvent) -> Option> { +//! None +//! } +//! } +//! +//! // Usage with in-memory repository +//! # async fn example() -> Result<(), Box> { +//! let repository = InMemoryRepository::::new(); +//! +//! // Create and persist a new counter +//! let mut counter: Context = Counter::record_new( +//! CounterEvent::Created { +//! event_id: "evt-1".to_string(), +//! initial_value: 0, +//! } +//! )?; +//! +//! counter.record_that(CounterEvent::Incremented { +//! event_id: "evt-2".to_string(), +//! amount: 5, +//! })?; +//! +//! // Save to repository +//! let mut transaction = repository.begin_transaction().await?; +//! transaction.store(&mut counter).await?; +//! transaction.commit()?; +//! +//! // Load from repository +//! let loaded_counter = repository.load(&"counter-1".to_string()).await?; +//! assert_eq!(loaded_counter.state().value, 5); +//! assert_eq!(loaded_counter.version(), 1); +//! # Ok(()) +//! # } +//! ``` +//! +//! ## Architecture +//! +//! Eventastic is built around four core modules: +//! +//! - **[`aggregate`]** - Domain aggregates that encapsulate business logic and generate events +//! - **[`event`]** - Domain events and event store abstractions for persistence +//! - **[`repository`]** - Transaction-based persistence layer with read/write operations +//! - **[`memory`]** - In-memory implementation for testing and development +//! +//! ### Transaction-First Design +//! +//! Unlike many event sourcing libraries, Eventastic requires transactions for all write +//! operations. This ensures: +//! +//! - **ACID compliance** - All changes are atomic and consistent +//! - **Idempotency** - Duplicate events are detected and handled gracefully +//! - **Side effect reliability** - External operations are processed via outbox pattern +//! - **Optimistic concurrency** - Concurrent modifications are detected and rejected +//! +//! ## Complete Example +//! +//! For an implementation demonstrating all concepts, see the +//! [banking example](https://github.com/jdon/eventastic/tree/main/examples/bank) +//! which shows. + pub mod aggregate; pub mod event; pub mod repository; + +pub mod memory; + +#[cfg(test)] +mod test_fixtures; diff --git a/eventastic/src/memory.rs b/eventastic/src/memory.rs new file mode 100644 index 0000000..0f67964 --- /dev/null +++ b/eventastic/src/memory.rs @@ -0,0 +1,936 @@ +//! In-memory repository implementation for testing and development. +//! +//! This module provides an in-memory implementation that can be used for testing, +//! development, and scenarios where you don't need persistent storage. + +use std::collections::HashMap; +use std::fmt::Debug; +use std::hash::Hash; +use std::sync::{Arc, Mutex}; + +use async_trait::async_trait; +use futures::{Stream, stream}; + +use crate::{ + aggregate::{Aggregate, Context, SaveError}, + event::{DomainEvent, EventStoreEvent}, + repository::{Repository, RepositoryReader, RepositoryWriter, Snapshot}, +}; + +/// Error type for in-memory repository operations. +#[derive(Debug, thiserror::Error)] +pub enum InMemoryError { + #[error("Aggregate not found")] + AggregateNotFound, + #[error("Event not found")] + EventNotFound, + #[error("Version conflict: expected {expected}, got {actual}")] + VersionConflict { expected: u64, actual: u64 }, + #[error("Event already exists with different content")] + EventExists, +} + +/// In-memory storage for events and snapshots. +#[derive(Debug, Clone)] +struct InMemoryStorage +where + T::AggregateId: Hash + Eq, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq, + T::SideEffect: Debug + Clone, +{ + /// Events stored by aggregate ID, then by version + events: HashMap>>, + /// Events indexed by event ID for quick lookup + events_by_id: + HashMap<<::DomainEvent as DomainEvent>::EventId, (T::AggregateId, u64)>, + /// Snapshots stored by aggregate ID + snapshots: HashMap>, + /// Side effects storage + side_effects: Vec, +} + +impl Default for InMemoryStorage +where + T::AggregateId: Hash + Eq, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq, + T::SideEffect: Debug + Clone, +{ + fn default() -> Self { + Self { + events: HashMap::new(), + events_by_id: HashMap::new(), + snapshots: HashMap::new(), + side_effects: Vec::new(), + } + } +} + +/// An in-memory repository implementation that stores events and snapshots in memory. +/// +/// This repository is useful for testing, development, and scenarios where +/// persistent storage is not required. All data is lost when the repository +/// is dropped. +/// +/// # Example +/// +/// ```rust +/// use eventastic::memory::InMemoryRepository; +/// use eventastic::aggregate::{Aggregate, Root, SideEffect}; +/// use eventastic::event::DomainEvent; +/// +/// // Define a simple aggregate for demonstration +/// #[derive(Clone, Debug, PartialEq, Eq, Hash)] +/// struct Counter { +/// id: String, +/// count: i32, +/// } +/// +/// #[derive(Clone, Debug, PartialEq, Eq, Hash)] +/// enum CounterEvent { +/// Created { event_id: String, initial_count: i32 }, +/// Incremented { event_id: String, amount: i32 }, +/// } +/// +/// impl DomainEvent for CounterEvent { +/// type EventId = String; +/// fn id(&self) -> &Self::EventId { +/// match self { +/// CounterEvent::Created { event_id, .. } => event_id, +/// CounterEvent::Incremented { event_id, .. } => event_id, +/// } +/// } +/// } +/// +/// #[derive(Clone, Debug, PartialEq, Eq, Hash)] +/// struct CounterSideEffect { +/// id: String, +/// message: String, +/// } +/// +/// impl SideEffect for CounterSideEffect { +/// type SideEffectId = String; +/// fn id(&self) -> &Self::SideEffectId { +/// &self.id +/// } +/// } +/// +/// impl Aggregate for Counter { +/// const SNAPSHOT_VERSION: u64 = 1; +/// type AggregateId = String; +/// type DomainEvent = CounterEvent; +/// type ApplyError = String; +/// type SideEffect = CounterSideEffect; +/// +/// fn aggregate_id(&self) -> &Self::AggregateId { +/// &self.id +/// } +/// +/// fn apply_new(event: &Self::DomainEvent) -> Result { +/// match event { +/// CounterEvent::Created { initial_count, .. } => Ok(Counter { +/// id: "counter-1".to_string(), +/// count: *initial_count, +/// }), +/// _ => Err("Counter must start with Created event".to_string()), +/// } +/// } +/// +/// fn apply(&mut self, event: &Self::DomainEvent) -> Result<(), Self::ApplyError> { +/// match event { +/// CounterEvent::Created { .. } => Err("Counter already exists".to_string()), +/// CounterEvent::Incremented { amount, .. } => { +/// self.count += amount; +/// Ok(()) +/// } +/// } +/// } +/// +/// fn side_effects(&self, _event: &Self::DomainEvent) -> Option> { +/// None +/// } +/// } +/// +/// // Create a repository for your aggregate type +/// # async fn example() -> Result<(), Box> { +/// use eventastic::repository::Repository; +/// +/// let repository: InMemoryRepository = InMemoryRepository::new(); +/// +/// // Use transactions to store aggregates +/// let mut aggregate = Counter::record_new(CounterEvent::Created { +/// event_id: "event-1".to_string(), +/// initial_count: 0, +/// })?; +/// +/// let mut transaction = repository.begin_transaction().await?; +/// transaction.store(&mut aggregate).await?; +/// transaction.commit()?; +/// +/// // Load aggregates using the Repository trait +/// let loaded = repository.load(&"counter-1".to_string()).await?; +/// assert_eq!(loaded.state().count, 0); +/// # Ok(()) +/// # } +/// ``` +#[derive(Debug, Clone)] +pub struct InMemoryRepository +where + T::AggregateId: Hash + Eq, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq, + T::SideEffect: Debug + Clone, +{ + storage: Arc>>, +} + +impl InMemoryRepository +where + T::AggregateId: Hash + Eq, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq, + T::SideEffect: Debug + Clone, +{ + /// Creates a new empty in-memory repository. + pub fn new() -> Self { + Self { + storage: Arc::new(Mutex::new(InMemoryStorage::default())), + } + } + + /// Gets a specific event by ID. + pub fn get_event( + &self, + aggregate_id: &T::AggregateId, + event_id: &<::DomainEvent as DomainEvent>::EventId, + ) -> Option> { + let storage = self.storage.lock().unwrap(); + + if let Some((stored_aggregate_id, version)) = storage.events_by_id.get(event_id) { + if stored_aggregate_id == aggregate_id { + if let Some(aggregate_events) = storage.events.get(aggregate_id) { + return aggregate_events.get(version).cloned(); + } + } + } + + None + } + + /// Gets all events for an aggregate starting from a specific version. + pub fn get_events_from( + &self, + aggregate_id: &T::AggregateId, + from_version: u64, + ) -> Vec> { + let storage = self.storage.lock().unwrap(); + + if let Some(aggregate_events) = storage.events.get(aggregate_id) { + let mut events: Vec<_> = aggregate_events + .iter() + .filter(|(version, _)| **version >= from_version) + .map(|(_, event)| event.clone()) + .collect(); + + events.sort_by_key(|event| event.version); + events + } else { + Vec::new() + } + } + + /// Returns the number of stored side effects. + pub fn side_effects_count(&self) -> usize { + let storage = self.storage.lock().unwrap(); + storage.side_effects.len() + } + + /// Returns all stored side effects. + pub fn get_all_side_effects(&self) -> Vec { + let storage = self.storage.lock().unwrap(); + storage.side_effects.clone() + } +} + +impl Default for InMemoryRepository +where + T::AggregateId: Hash + Eq, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq, + T::SideEffect: Debug + Clone, +{ + fn default() -> Self { + Self::new() + } +} + +/// Transaction-like wrapper for in-memory repository. +/// +/// This provides the same interface as a database transaction but operates +/// on the in-memory storage. All operations are immediately committed. +#[derive(Debug)] +pub struct InMemoryTransaction +where + T::AggregateId: Hash + Eq, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq, + T::SideEffect: Debug + Clone, +{ + repository: InMemoryRepository, +} + +impl InMemoryTransaction +where + T::AggregateId: Hash + Eq + Send + Sync, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq + Send + Sync, + T::SideEffect: Debug + Clone + Send + Sync, + T: Send + Sync, + T::DomainEvent: Send + Sync, + T::ApplyError: Send + Sync, +{ + /// Creates a new transaction wrapper around the repository. + pub fn new(repository: InMemoryRepository) -> Self { + Self { repository } + } + + /// Gets an aggregate by ID (equivalent to loading in a transaction). + pub async fn get( + &mut self, + aggregate_id: &T::AggregateId, + ) -> Result, InMemoryError> { + Context::load(self, aggregate_id) + .await + .map_err(|e| match e { + crate::repository::RepositoryError::AggregateNotFound => { + InMemoryError::AggregateNotFound + } + crate::repository::RepositoryError::Apply(_, _) => InMemoryError::AggregateNotFound, + crate::repository::RepositoryError::Repository(db_err) => db_err, + }) + } + + /// Stores an aggregate (equivalent to saving in a transaction). + pub async fn store(&mut self, context: &mut Context) -> Result<(), InMemoryError> { + context.save(self).await.map_err(|e| match e { + SaveError::IdempotencyError(_, _) => InMemoryError::EventExists, + SaveError::OptimisticConcurrency(_, version) => InMemoryError::VersionConflict { + expected: version, + actual: version, + }, + SaveError::Repository(db_err) => db_err, + }) + } + + /// Commits the transaction (no-op for in-memory). + pub fn commit(self) -> Result<(), InMemoryError> { + Ok(()) + } + + /// Rolls back the transaction (no-op for in-memory). + pub fn rollback(self) -> Result<(), InMemoryError> { + Ok(()) + } +} + +#[async_trait] +impl RepositoryReader for InMemoryTransaction +where + T::AggregateId: Hash + Eq + Send + Sync, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq + Send + Sync, + T::SideEffect: Debug + Clone + Send + Sync, + T: Send + Sync, + T::DomainEvent: Send + Sync, +{ + type DbError = InMemoryError; + + fn stream_from( + &mut self, + id: &T::AggregateId, + version: u64, + ) -> impl Stream, Self::DbError>> { + let events = self.repository.get_events_from(id, version); + stream::iter(events.into_iter().map(Ok)) + } + + async fn get_event( + &mut self, + aggregate_id: &T::AggregateId, + event_id: &<::DomainEvent as DomainEvent>::EventId, + ) -> Result>, Self::DbError> { + Ok(self.repository.get_event(aggregate_id, event_id)) + } + + async fn get_snapshot( + &mut self, + id: &T::AggregateId, + ) -> Result>, Self::DbError> { + let storage = self.repository.storage.lock().unwrap(); + + // This is the key: only return snapshots that match the current SNAPSHOT_VERSION + if let Some(snapshot) = storage.snapshots.get(id) { + if snapshot.snapshot_version == T::SNAPSHOT_VERSION { + Ok(Some(snapshot.clone())) + } else { + // Snapshot version is incompatible, return None to force event replay + Ok(None) + } + } else { + Ok(None) + } + } +} + +#[async_trait] +impl RepositoryWriter for InMemoryTransaction +where + T::AggregateId: Hash + Eq + Send + Sync, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq + Send + Sync, + T::SideEffect: Debug + Clone + Send + Sync, + T: Send + Sync, + T::DomainEvent: Send + Sync, +{ + async fn store_events( + &mut self, + id: &T::AggregateId, + events: Vec>, + ) -> Result::DomainEvent as DomainEvent>::EventId>, Self::DbError> { + let mut storage = self.repository.storage.lock().unwrap(); + let mut stored_ids = Vec::new(); + + for event in events { + // Check for duplicate event IDs + if let Some((existing_agg_id, _existing_version)) = storage.events_by_id.get(event.id()) + { + if existing_agg_id == id { + // Same aggregate - event already exists, don't insert again + // Let Context::save handle idempotency checking + continue; + } else { + // Different aggregate has the same event ID - this is an error + return Err(InMemoryError::EventExists); + } + } + + // Check for version conflicts (optimistic concurrency) + if let Some(aggregate_events) = storage.events.get(id) { + if aggregate_events.contains_key(&event.version) { + return Err(InMemoryError::VersionConflict { + expected: event.version, + actual: event.version, + }); + } + } + + // Store the event + storage + .events + .entry(id.clone()) + .or_default() + .insert(event.version, event.clone()); + + storage + .events_by_id + .insert(event.id().clone(), (id.clone(), event.version)); + + stored_ids.push(event.id().clone()); + } + + Ok(stored_ids) + } + + async fn store_snapshot(&mut self, snapshot: Snapshot) -> Result<(), Self::DbError> { + let mut storage = self.repository.storage.lock().unwrap(); + storage + .snapshots + .insert(snapshot.aggregate.aggregate_id().clone(), snapshot); + Ok(()) + } + + async fn store_side_effects( + &mut self, + side_effects: Vec, + ) -> Result<(), Self::DbError> { + let mut storage = self.repository.storage.lock().unwrap(); + storage.side_effects.extend(side_effects); + Ok(()) + } +} + +#[async_trait] +impl Repository for InMemoryRepository +where + T::AggregateId: Hash + Eq + Send + Sync, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq + Send + Sync, + T::SideEffect: Debug + Clone + Send + Sync, + T: Send + Sync, + T::DomainEvent: Send + Sync, + T::ApplyError: Send + Sync, +{ + type Error = InMemoryError; + + async fn load(&self, aggregate_id: &T::AggregateId) -> Result, Self::Error> { + let mut transaction = InMemoryTransaction::new(self.clone()); + Context::load(&mut transaction, aggregate_id) + .await + .map_err(|e| match e { + crate::repository::RepositoryError::AggregateNotFound => { + InMemoryError::AggregateNotFound + } + crate::repository::RepositoryError::Apply(_, _) => InMemoryError::AggregateNotFound, + crate::repository::RepositoryError::Repository(db_err) => db_err, + }) + } +} + +impl InMemoryRepository +where + T::AggregateId: Hash + Eq + Send + Sync, + <::DomainEvent as DomainEvent>::EventId: Hash + Eq + Send + Sync, + T::SideEffect: Debug + Clone + Send + Sync, + T: Send + Sync, + T::DomainEvent: Send + Sync, + T::ApplyError: Send + Sync, +{ + /// Begins a new "transaction" (returns a transaction-like wrapper). + pub async fn begin_transaction(&self) -> Result, InMemoryError> { + Ok(InMemoryTransaction::new(self.clone())) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + aggregate::{Context, Root, SaveError}, + repository::Repository, + test_fixtures::*, + }; + + // Tests basic repository operations: create, store, and load aggregates + #[tokio::test] + async fn test_repository_load_and_save() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create a new counter + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + + // Add some operations + context.record_that(create_add_event("add-1", 10)).unwrap(); + context + .record_that(create_subtract_event("sub-1", 3)) + .unwrap(); + + // Save using transaction + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context).await.unwrap(); + transaction.commit().unwrap(); + + // Load the counter + let loaded_context = repository.load(&"calc-1".to_string()).await.unwrap(); + assert_eq!(loaded_context.state().result, 7); + assert_eq!(loaded_context.state().operations_count, 2); + assert_eq!(loaded_context.version(), 2); + } + + // Tests that Repository::load returns an error for non-existent aggregates + #[tokio::test] + async fn test_repository_aggregate_not_found() { + let repository: InMemoryRepository = InMemoryRepository::new(); + let result = repository.load(&"non-existent".to_string()).await; + assert!(result.is_err()); + } + + // Tests InMemoryTransaction operations: get, store, and commit within transactions + #[tokio::test] + async fn test_repository_transaction_operations() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter using transaction + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context).await.unwrap(); + transaction.commit().unwrap(); + + // Load using transaction + let mut transaction = repository.begin_transaction().await.unwrap(); + let mut loaded_context = transaction.get(&"calc-1".to_string()).await.unwrap(); + + // Modify and save + loaded_context + .record_that(create_add_event("add-1", 20)) + .unwrap(); + transaction.store(&mut loaded_context).await.unwrap(); + transaction.commit().unwrap(); + + // Verify changes + let final_context = repository.load(&"calc-1".to_string()).await.unwrap(); + assert_eq!(final_context.state().result, 20); + } + + // Tests that storing the same event twice succeeds (idempotent behavior) + #[tokio::test] + async fn test_repository_idempotency() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context1 = TestCounter::record_new(reset_event).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context1).await.unwrap(); + transaction.commit().unwrap(); + + // Try to apply the same event again + let same_reset_event = create_reset_event("reset-1", "calc-1"); + let mut context2 = TestCounter::record_new(same_reset_event).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + let result = transaction.store(&mut context2).await; + + // Should succeed since content is the same + assert!(result.is_ok()); + } + + // Tests that cross-aggregate event ID conflicts are properly rejected + #[tokio::test] + async fn test_repository_idempotency_error() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context1 = TestCounter::record_new(reset_event).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context1).await.unwrap(); + transaction.commit().unwrap(); + + // Try to apply different event with same ID + let different_reset_event = create_reset_event("reset-1", "different-calc"); + let mut context2 = TestCounter::record_new(different_reset_event).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + let result = transaction.store(&mut context2).await; + + // Should fail with idempotency error + assert!(result.is_err()); + } + + // Tests that snapshot version checking works correctly (incompatible snapshots are ignored) + #[tokio::test] + async fn test_snapshot_version_checking() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create and save counter + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + context.record_that(create_add_event("add-1", 100)).unwrap(); + + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context).await.unwrap(); + transaction.commit().unwrap(); + + // Verify snapshot was stored with correct version + let mut transaction = repository.begin_transaction().await.unwrap(); + let snapshot = transaction + .get_snapshot(&"calc-1".to_string()) + .await + .unwrap(); + + assert!(snapshot.is_some()); + let snap = snapshot.unwrap(); + assert_eq!(snap.snapshot_version, TestCounter::SNAPSHOT_VERSION); + assert_eq!(snap.version, 1); + assert_eq!(snap.aggregate.result, 100); + } + + // Tests InMemoryRepository utility methods for side effects tracking + #[tokio::test] + async fn test_in_memory_repository_basic_operations() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Test empty repository + assert_eq!(repository.side_effects_count(), 0); + assert!(repository.get_all_side_effects().is_empty()); + + // Create and save data + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + context.record_that(create_add_event("add-1", 50)).unwrap(); + + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context).await.unwrap(); + transaction.commit().unwrap(); + + // Verify side effects were stored + assert!(repository.side_effects_count() > 0); + assert!(!repository.get_all_side_effects().is_empty()); + } + + // Tests that Context::save handles empty event lists correctly (no-op behavior) + #[tokio::test] + async fn test_context_save_empty_events() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter and save initially + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context).await.unwrap(); + transaction.commit().unwrap(); + + // Load the counter and try to save without any changes + let mut loaded_context = repository.load(&"calc-1".to_string()).await.unwrap(); + + // No new events recorded + assert_eq!(loaded_context.take_uncommitted_events().len(), 0); + + // Save should succeed with empty events + let mut transaction = repository.begin_transaction().await.unwrap(); + let result = loaded_context.save(&mut transaction).await; + assert!(result.is_ok()); + transaction.commit().unwrap(); + } + + // Tests that Context::save properly detects and reports idempotency violations + #[tokio::test] + async fn test_context_save_idempotency_error() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create and save initial counter + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context1 = TestCounter::record_new(reset_event).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + context1.save(&mut transaction).await.unwrap(); + transaction.commit().unwrap(); + + // Load the counter and add an event with a specific ID + let mut loaded_context = repository.load(&"calc-1".to_string()).await.unwrap(); + loaded_context + .record_that(create_add_event("add-1", 50)) + .unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + loaded_context.save(&mut transaction).await.unwrap(); + transaction.commit().unwrap(); + + // Now try to add a DIFFERENT event with the same ID "add-1" + let mut loaded_context2 = repository.load(&"calc-1".to_string()).await.unwrap(); + loaded_context2 + .record_that(create_add_event("add-1", 100)) + .unwrap(); // Different value! + let mut transaction = repository.begin_transaction().await.unwrap(); + + // Should fail with idempotency error (same event ID, different content) + let result = loaded_context2.save(&mut transaction).await; + assert!(matches!(result, Err(SaveError::IdempotencyError(_, _)))); + } + + // Tests that Context::save properly detects and reports optimistic concurrency violations + #[tokio::test] + async fn test_context_save_optimistic_concurrency() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create and save initial counter + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + context.save(&mut transaction).await.unwrap(); + transaction.commit().unwrap(); + + // Load the same counter in two different contexts (simulating concurrent access) + let mut context1 = repository.load(&"calc-1".to_string()).await.unwrap(); + let mut context2 = repository.load(&"calc-1".to_string()).await.unwrap(); + + // Modify both contexts - they should both try to add version 1 events + context1.record_that(create_add_event("add-1", 10)).unwrap(); + context2.record_that(create_add_event("add-2", 20)).unwrap(); + + // First save should succeed + let mut transaction1 = repository.begin_transaction().await.unwrap(); + let result1 = context1.save(&mut transaction1).await; + assert!(result1.is_ok()); + transaction1.commit().unwrap(); + + // Second save should fail with repository error (version conflict) + let mut transaction2 = repository.begin_transaction().await.unwrap(); + let result2 = context2.save(&mut transaction2).await; + // The in-memory implementation returns a Repository error for version conflicts + assert!(matches!( + result2, + Err(SaveError::Repository(InMemoryError::VersionConflict { .. })) + )); + } + + // Tests that Context::load correctly reconstructs aggregates from events and snapshots + #[tokio::test] + async fn test_context_load_with_snapshot() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create and save counter with events + let mut context = TestCounter::record_new(create_reset_event("reset-1", "calc-1")).unwrap(); + context.record_that(create_add_event("add-1", 100)).unwrap(); + + let mut transaction = repository.begin_transaction().await.unwrap(); + context.save(&mut transaction).await.unwrap(); + transaction.commit().unwrap(); + + // Load using Context::load method directly + let mut transaction = repository.begin_transaction().await.unwrap(); + let mut loaded_context = Context::load(&mut transaction, &"calc-1".to_string()) + .await + .unwrap(); + + assert_eq!(loaded_context.state().result, 100); + assert_eq!(loaded_context.version(), 1); + assert_eq!(loaded_context.take_uncommitted_events().len(), 0); + } + + // Tests that Context::regenerate_side_effects can regenerate side effects for specific events + #[tokio::test] + async fn test_regenerate_side_effects_specific_event() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter with multiple events + let mut context = TestCounter::record_new(create_reset_event("reset-1", "calc-1")).unwrap(); + context.record_that(create_add_event("add-1", 100)).unwrap(); + context + .record_that(create_subtract_event("sub-1", 25)) + .unwrap(); + + let mut transaction = repository.begin_transaction().await.unwrap(); + context.save(&mut transaction).await.unwrap(); + transaction.commit().unwrap(); + + // Regenerate side effects for the Add event + let mut transaction = repository.begin_transaction().await.unwrap(); + let side_effects = Context::::regenerate_side_effects( + &mut transaction, + &"calc-1".to_string(), + &"add-1".to_string(), + ) + .await + .unwrap(); + + assert!(side_effects.is_some()); + let effects = side_effects.unwrap(); + assert_eq!(effects.len(), 1); // Add event generates 1 side effect + + assert!(matches!( + effects[0], + TestSideEffect::LogOperation { ref operation, .. } if operation == "Add 100" + )); + } + + // Tests that Context::regenerate_side_effects returns None for non-existent events + #[tokio::test] + async fn test_regenerate_side_effects_event_not_found() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter + let mut context = TestCounter::record_new(create_reset_event("reset-1", "calc-1")).unwrap(); + let mut transaction = repository.begin_transaction().await.unwrap(); + context.save(&mut transaction).await.unwrap(); + transaction.commit().unwrap(); + + // Try to regenerate side effects for non-existent event + let mut transaction = repository.begin_transaction().await.unwrap(); + let side_effects = Context::::regenerate_side_effects( + &mut transaction, + &"calc-1".to_string(), + &"non-existent-event".to_string(), + ) + .await + .unwrap(); + + assert!(side_effects.is_none()); + } + + // Tests a complete end-to-end workflow with multiple operations and side effects + #[tokio::test] + async fn test_complete_counter_workflow() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter + let reset_event = create_reset_event("reset-1", "my-counter"); + let mut counter = TestCounter::record_new(reset_event).unwrap(); + + // Perform calculations + counter.record_that(create_add_event("add-1", 100)).unwrap(); + counter + .record_that(create_subtract_event("sub-1", 25)) + .unwrap(); + counter + .record_that(create_multiply_event("mul-1", 2)) + .unwrap(); + counter.record_that(create_add_event("add-2", 50)).unwrap(); + + // Save to repository using transaction + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut counter).await.unwrap(); + transaction.commit().unwrap(); + + // Load and verify final state + let loaded_counter = repository.load(&"my-counter".to_string()).await.unwrap(); + assert_eq!(loaded_counter.state().result, 200); // ((100 - 25) * 2) + 50 = 200 + assert_eq!(loaded_counter.state().operations_count, 4); + assert_eq!(loaded_counter.version(), 4); + + // Verify side effects were stored + // Reset: 2 side effects, Add: 1, Subtract: 1, Multiply: 0, Add: 1 = 5 total + let expected_side_effects = 5; + assert_eq!(repository.side_effects_count(), expected_side_effects); + + let side_effects = repository.get_all_side_effects(); + assert_eq!(side_effects.len(), expected_side_effects); + } + + // Tests error handling and recovery behavior when invalid operations are attempted + #[tokio::test] + async fn test_error_handling_and_recovery() { + let repository: InMemoryRepository = InMemoryRepository::new(); + + // Create counter + let mut counter = TestCounter::record_new(create_reset_event("reset-1", "calc-1")).unwrap(); + + // Add some value + counter.record_that(create_add_event("add-1", 10)).unwrap(); + + // Try invalid operation (should fail) + let invalid_result = counter.record_that(create_multiply_event("mul-zero", 0)); + assert!(matches!(invalid_result, Err(TestError::DivisionByZero))); + + // State should be unchanged after error + assert_eq!(counter.state().result, 10); + assert_eq!(counter.version(), 1); + + // Continue with valid operations + counter + .record_that(create_multiply_event("mul-valid", 3)) + .unwrap(); + assert_eq!(counter.state().result, 30); + assert_eq!(counter.version(), 2); + + // Save and verify persistence + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut counter).await.unwrap(); + transaction.commit().unwrap(); + + let loaded = repository.load(&"calc-1".to_string()).await.unwrap(); + assert_eq!(loaded.state().result, 30); + assert_eq!(loaded.version(), 2); + } + + // Tests that InMemoryRepository::default() works correctly + #[tokio::test] + async fn test_in_memory_repository_default() { + let repository: InMemoryRepository = InMemoryRepository::default(); + + // Should behave exactly like new() + assert_eq!(repository.side_effects_count(), 0); + assert!(repository.get_all_side_effects().is_empty()); + + // Should be able to store and retrieve data + let reset_event = create_reset_event("reset-1", "calc-1"); + let mut context = TestCounter::record_new(reset_event).unwrap(); + + let mut transaction = repository.begin_transaction().await.unwrap(); + transaction.store(&mut context).await.unwrap(); + transaction.commit().unwrap(); + + let loaded = repository.load(&"calc-1".to_string()).await.unwrap(); + assert_eq!(loaded.state().id, "calc-1"); + } +} diff --git a/eventastic/src/repository.rs b/eventastic/src/repository.rs index 6ceda8a..a77e1cf 100644 --- a/eventastic/src/repository.rs +++ b/eventastic/src/repository.rs @@ -1,36 +1,82 @@ +//! Repository abstractions for event sourcing persistence. +//! +//! This module defines the core persistence traits that concrete implementations +//! (like `eventastic_postgres`) must implement to support event sourcing operations. +//! +//! ## Repository Traits +//! +//! ### [`RepositoryReader`] +//! Provides read-only access to event streams and snapshots. Use for queries, +//! reporting, or loading aggregates without modification. +//! +//! ### [`RepositoryWriter`] +//! Extends [`RepositoryReader`] with write operations within a transaction boundary. +//! Required for any operation that modifies aggregate state or produces side effects. +//! +//! ### [`Repository`] +//! High-level abstraction for simple aggregate loading without explicit +//! transaction management. +//! +//! ## Usage Pattern +//! +//! ```rust,ignore +//! // Begin transaction for write operations +//! let mut transaction = repository.begin_transaction().await?; +//! let mut context = transaction.get(&aggregate_id).await?; +//! context.record_that(event)?; +//! transaction.store(&mut context).await?; +//! transaction.commit().await?; +//! ``` +//! +//! For the complete event sourcing workflow, see [`crate::event`] and [`crate::aggregate`]. +//! use async_trait::async_trait; -use futures::TryStreamExt; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use futures::Stream; use std::fmt::Debug; use crate::{ aggregate::{Aggregate, Context}, - event::{EventStoreEvent, Stream}, + event::{DomainEvent, EventStoreEvent}, }; -/// List of possible errors that can be returned by the [`RepositoryTransaction`] trait. +/// List of possible errors that can be returned by the [`RepositoryWriter`] trait. +/// +/// Each error type represents a specific failure scenario that can occur during +/// repository operations. Understanding these errors is crucial for implementing +/// proper error handling and recovery strategies. #[derive(Debug, thiserror::Error)] pub enum RepositoryError { - /// This error is returned by [`RepositoryTransaction::get`] when the - /// desired Aggregate could not be found in the data store. + /// This error is returned by [`RepositoryWriter`] methods when the + /// desired [`Aggregate`] could not be found in the data store. #[error("Aggregate was not found")] AggregateNotFound, - /// This error is returned by [`RepositoryTransaction::get`] when - /// the desired [Aggregate] returns an error while applying a Domain Event + /// This error is returned by [`RepositoryWriter`] methods when + /// the desired [`Aggregate`] returns an error while applying a Domain Event. /// - /// This usually implies the Event contains corrupted or invalid data. + /// ## When this occurs: + /// - Event contains corrupted or invalid data + /// - Event violates business rules or invariants + /// - Schema evolution issues where old events can't be applied to new aggregates + /// - Serialization/deserialization failures #[error("Failed to apply events to aggregate from event stream. Event Id: {0} caused: {1}")] Apply(EventId, #[source] E), - /// This error is returned when the [`RepositoryTransaction::get`] returns + /// This error is returned when [`RepositoryWriter`] methods return /// an unexpected error while streaming back the Aggregate's Event Stream. + /// + /// ## When this occurs: + /// - Database connection failures + /// - Network connectivity issues + /// - Serialization/deserialization errors + /// - Database query failures + /// - Transaction isolation issues #[error("Event store failed while streaming events: {0}")] Repository(#[from] DE), } -/// A snap of the [`Aggregate`] that is persisted in the db. -#[derive(Serialize, Deserialize, Debug, Clone)] +/// A snapshot of the [`Aggregate`] that is persisted in the db. +#[derive(Debug, Clone)] pub struct Snapshot where T: Aggregate, @@ -40,35 +86,15 @@ where pub snapshot_version: u64, } -impl Snapshot -where - T: Aggregate, -{ - pub fn id(&self) -> &T::AggregateId { - self.aggregate.aggregate_id() - } -} - -/// A RepositoryTransaction is an object that allows to load and save -/// an [`Aggregate`] from and to a persistent data store +/// A RepositoryReader provides read-only access to aggregate data. +/// +/// This trait defines the interface for reading events and snapshots from the event store. +/// It can be implemented by both transactional and non-transactional repository +/// implementations to enable efficient read operations without requiring write access. #[async_trait] -pub trait RepositoryTransaction -where - T: Aggregate, - T::AggregateId: Clone + Send + Sync, - T::ApplyError: Debug, - Self: Sized + Send + Sync, -{ - /// The error type returned by the Store during a [`RepositoryTransaction::stream`] and [`RepositoryTransaction::append`] call. - type DbError: Send + Sync; - - /// Opens an Event Stream, effectively streaming all Domain Events - /// of an Event Stream back in the application. - #[doc(hidden)] - fn stream( - &mut self, - id: &T::AggregateId, - ) -> Stream; +pub trait RepositoryReader { + /// The error type returned by the Store during repository operations. + type DbError; /// Opens an Event Stream, effectively streaming all Domain Events /// of an Event Stream back in the application from a specific version. @@ -77,132 +103,76 @@ where &mut self, id: &T::AggregateId, version: u64, - ) -> Stream; + ) -> impl Stream, Self::DbError>>; - // Get a specific event from the event store. + /// Get a specific event from the event store by its ID. #[doc(hidden)] async fn get_event( &mut self, aggregate_id: &T::AggregateId, - event_id: &T::DomainEventId, - ) -> Result< - Option::DomainEvent>>, - Self::DbError, - >; + event_id: &<::DomainEvent as DomainEvent>::EventId, + ) -> Result>, Self::DbError>; - /// Appends a new Domain Events to the specified Event Stream. - /// - /// The result of this operation is the new [Version] of the Event Stream - /// with the specified Domain Events added to it. + /// Retrieves the latest snapshot of the Aggregate from the Event Store. + /// This method must check that the snapshot version matches the expected + /// [`Aggregate::SNAPSHOT_VERSION`] to ensure compatibility. #[doc(hidden)] - async fn append( + async fn get_snapshot( &mut self, id: &T::AggregateId, - events: Vec>, - ) -> Result<(), Self::DbError>; - - #[doc(hidden)] - async fn get_snapshot(&mut self, id: &T::AggregateId) -> Option> - where - T: DeserializeOwned; + ) -> Result>, Self::DbError>; +} +/// A RepositoryTransaction provides transactional access to aggregate persistence. +/// +/// This trait extends [`RepositoryReader`] to provide write operations within a transaction +/// boundary. All write operations in event sourcing must be performed within a transaction +/// to ensure consistency between events, snapshots, and side effects. +#[async_trait] +pub trait RepositoryWriter: RepositoryReader { + /// Appends new Domain Events to the specified Event Stream. + /// + /// Returns a list of the Domain Event Ids that were successfully stored. #[doc(hidden)] - async fn store_snapshot(&mut self, snapshot: Snapshot) -> Result<(), Self::DbError> - where - T: Serialize; - - /// Loads an Aggregate Root instance from the data store, - /// referenced by its unique identifier. - async fn get( + async fn store_events( &mut self, id: &T::AggregateId, - ) -> Result, RepositoryError> - where - T: DeserializeOwned, - { - let snapshot = self.get_snapshot(id).await; - - let (context, version) = if let Some(snapshot) = snapshot { - if snapshot.snapshot_version == T::SNAPSHOT_VERSION { - // Snapshot is valid so return it - let context: Context = snapshot.into(); - // We want to get the next event in the stream - let version = context.version() + 1; - (Some(context), version) - } else { - (None, 0) - } - } else { - (None, 0) - }; + events: Vec>, + ) -> Result::DomainEvent as DomainEvent>::EventId>, Self::DbError>; - let ctx = self - .stream_from(id, version) - .map_err(RepositoryError::Repository) - .try_fold(context, |ctx: Option>, event| async move { - let new_ctx_result = match ctx { - None => Context::rehydrate_from(&event), - Some(ctx) => ctx.apply_rehydrated_event(&event), - }; - - let new_ctx = new_ctx_result.map_err(|e| RepositoryError::Apply(event.id, e))?; - - Ok(Some(new_ctx)) - }) - .await?; - - ctx.ok_or(RepositoryError::AggregateNotFound) - } - - /// Stores a new version of an Aggregate Root instance to the data store. - async fn store( - &mut self, - root: &mut Context, - ) -> Result<(), RepositoryError> - where - T: Serialize, - T::SideEffect: Serialize, - { - let events_to_commit = root.take_uncommitted_events(); - - if events_to_commit.is_empty() { - return Ok(()); - } - - let side_effects_to_commit = root.take_uncommitted_side_effects(); - - let aggregate_id = root.aggregate_id(); - - let snapshot_version = root.snapshot_version(); - let snapshot_to_store = root.state(); - - let snapshot = Snapshot { - snapshot_version, - aggregate: snapshot_to_store.clone(), - version: root.version(), - }; - - self.append(aggregate_id, events_to_commit) - .await - .map_err(RepositoryError::Repository)?; - - self.store_snapshot(snapshot) - .await - .map_err(RepositoryError::Repository)?; - - self.insert_side_effects(side_effects_to_commit).await?; - - Ok(()) - } + /// Stores a snapshot of the aggregate state to optimize future loading. + #[doc(hidden)] + async fn store_snapshot(&mut self, snapshot: Snapshot) -> Result<(), Self::DbError>; - /// Insert side effects in to the repository + /// Insert side effects into the repository #[doc(hidden)] - async fn insert_side_effects( + async fn store_side_effects( &mut self, - outbox_item: Vec, - ) -> Result<(), Self::DbError> - where - T::SideEffect: Serialize; + side_effects: Vec, + ) -> Result<(), Self::DbError>; +} - async fn commit(self) -> Result<(), Self::DbError>; +/// A Repository provides high-level operations for loading +/// [`Aggregate`] instances without requiring explicit transaction management. +/// +/// This trait is intended for simpler use cases where automatic transaction +/// handling is preferred over manual transaction control. +#[async_trait] +pub trait Repository { + /// The error type returned by the Repository during operations. + type Error; + + /// Loads an aggregate from the repository by its ID. + /// + /// This method automatically handles transaction management and will + /// load the latest state of the aggregate by replaying its event stream. + /// If a snapshot is available, it will be used to optimize the loading process. + /// + /// # Errors + /// + /// Returns repository-specific errors which may include: + /// - Aggregate not found errors + /// - Database connection errors + /// - Event application errors + async fn load(&self, aggregate_id: &T::AggregateId) -> Result, Self::Error>; } diff --git a/eventastic/src/test_fixtures.rs b/eventastic/src/test_fixtures.rs new file mode 100644 index 0000000..6d6d4ee --- /dev/null +++ b/eventastic/src/test_fixtures.rs @@ -0,0 +1,191 @@ +//! Shared test fixtures for eventastic tests. +//! +//! This module provides common test aggregates, events, and side effects +//! that can be used across different test modules to ensure consistency +//! and reduce code duplication. + +use crate::{ + aggregate::{Aggregate, SideEffect}, + event::DomainEvent, +}; + +/// A test counter aggregate for use in tests. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct TestCounter { + pub id: String, + pub result: i32, + pub operations_count: i32, +} + +/// Test events for the TestCounter aggregate. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum TestEvent { + Reset { + event_id: String, + counter_id: String, + }, + Add { + event_id: String, + value: i32, + }, + Subtract { + event_id: String, + value: i32, + }, + Multiply { + event_id: String, + value: i32, + }, +} + +/// Test errors for the TestCounter aggregate. +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum TestError { + InvalidOperation, + DivisionByZero, +} + +impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + TestError::InvalidOperation => write!(f, "Invalid operation"), + TestError::DivisionByZero => write!(f, "Division by zero"), + } + } +} + +impl std::error::Error for TestError {} + +/// Test side effects for the TestCounter aggregate. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum TestSideEffect { + LogOperation { id: String, operation: String }, + NotifyUser { id: String, message: String }, +} + +impl DomainEvent for TestEvent { + type EventId = String; + + fn id(&self) -> &Self::EventId { + match self { + TestEvent::Reset { event_id, .. } => event_id, + TestEvent::Add { event_id, .. } => event_id, + TestEvent::Subtract { event_id, .. } => event_id, + TestEvent::Multiply { event_id, .. } => event_id, + } + } +} + +impl SideEffect for TestSideEffect { + type SideEffectId = String; + + fn id(&self) -> &Self::SideEffectId { + match self { + TestSideEffect::LogOperation { id, .. } => id, + TestSideEffect::NotifyUser { id, .. } => id, + } + } +} + +impl Aggregate for TestCounter { + const SNAPSHOT_VERSION: u64 = 1; + type AggregateId = String; + type DomainEvent = TestEvent; + type ApplyError = TestError; + type SideEffect = TestSideEffect; + + fn aggregate_id(&self) -> &Self::AggregateId { + &self.id + } + + fn apply_new(event: &Self::DomainEvent) -> Result { + match event { + TestEvent::Reset { counter_id, .. } => Ok(TestCounter { + id: counter_id.clone(), + result: 0, + operations_count: 0, + }), + _ => Err(TestError::InvalidOperation), + } + } + + fn apply(&mut self, event: &Self::DomainEvent) -> Result<(), Self::ApplyError> { + match event { + TestEvent::Reset { .. } => Err(TestError::InvalidOperation), + TestEvent::Add { value, .. } => { + self.result += value; + self.operations_count += 1; + Ok(()) + } + TestEvent::Subtract { value, .. } => { + self.result -= value; + self.operations_count += 1; + Ok(()) + } + TestEvent::Multiply { value, .. } => { + if *value == 0 { + return Err(TestError::DivisionByZero); + } + self.result *= value; + self.operations_count += 1; + Ok(()) + } + } + } + + fn side_effects(&self, event: &Self::DomainEvent) -> Option> { + match event { + TestEvent::Reset { event_id, .. } => Some(vec![ + TestSideEffect::LogOperation { + id: format!("{event_id}-log"), + operation: "Reset".to_string(), + }, + TestSideEffect::NotifyUser { + id: format!("{event_id}-notify"), + message: "Counter has been reset".to_string(), + }, + ]), + TestEvent::Add { event_id, value } => Some(vec![TestSideEffect::LogOperation { + id: format!("{event_id}-log"), + operation: format!("Add {value}"), + }]), + TestEvent::Subtract { event_id, value } => Some(vec![TestSideEffect::LogOperation { + id: format!("{event_id}-log"), + operation: format!("Subtract {value}"), + }]), + TestEvent::Multiply { .. } => None, // No side effects for multiply + } + } +} + +/// Helper function to create a basic reset event. +pub fn create_reset_event(event_id: &str, counter_id: &str) -> TestEvent { + TestEvent::Reset { + event_id: event_id.to_string(), + counter_id: counter_id.to_string(), + } +} + +/// Helper function to create an add event. +pub fn create_add_event(event_id: &str, value: i32) -> TestEvent { + TestEvent::Add { + event_id: event_id.to_string(), + value, + } +} + +/// Helper function to create a subtract event. +pub fn create_subtract_event(event_id: &str, value: i32) -> TestEvent { + TestEvent::Subtract { + event_id: event_id.to_string(), + value, + } +} + +/// Helper function to create a multiply event. +pub fn create_multiply_event(event_id: &str, value: i32) -> TestEvent { + TestEvent::Multiply { + event_id: event_id.to_string(), + value, + } +} diff --git a/eventastic_outbox_postgres/Cargo.toml b/eventastic_outbox_postgres/Cargo.toml new file mode 100644 index 0000000..d0de095 --- /dev/null +++ b/eventastic_outbox_postgres/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "eventastic_outbox_postgres" +version = "0.5.0" +edition = "2024" + +[dependencies] +eventastic_postgres = { version = "0.5", path = "../eventastic_postgres" } +async-trait = { workspace = true } +sqlx = { workspace = true } +chrono = { workspace = true } +uuid = { workspace = true } +eventastic = { path = "../eventastic", version = "0.5" } +futures-util = { workspace = true } +tokio = { workspace = true } +thiserror = { workspace = true } diff --git a/eventastic_outbox_postgres/src/lib.rs b/eventastic_outbox_postgres/src/lib.rs new file mode 100644 index 0000000..fe9a2a8 --- /dev/null +++ b/eventastic_outbox_postgres/src/lib.rs @@ -0,0 +1,22 @@ +//! # Eventastic PostgreSQL Outbox Pattern Implementation +//! +//! This crate provides a PostgreSQL-based implementation of the transactional outbox pattern +//! for the eventastic event sourcing framework. +//! +//! ## Overview +//! +//! The outbox pattern ensures reliable delivery of side effects by storing them in the same +//! database transaction as the domain events. A background worker processes these side effects +//! asynchronously, providing guaranteed delivery semantics. +//! +//! ## Components +//! +//! - [`TableOutbox`] - Default outbox implementation using a PostgreSQL table +//! - [`OutboxMessage`] - Wrapper for side effects stored in the outbox +//! - [`SideEffectHandler`] - Trait for processing side effects from the outbox +//! - [`RepositoryOutboxExt`] - Extension methods for running outbox workers +mod outbox; +mod outbox_message; + +pub use outbox::*; +pub use outbox_message::OutboxMessage; diff --git a/eventastic_outbox_postgres/src/outbox.rs b/eventastic_outbox_postgres/src/outbox.rs new file mode 100644 index 0000000..3084d92 --- /dev/null +++ b/eventastic_outbox_postgres/src/outbox.rs @@ -0,0 +1,362 @@ +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use eventastic::aggregate::{Aggregate, SideEffect}; +use eventastic::event::DomainEvent; +use eventastic_postgres::{ + EncryptionProvider, Pickle, PostgresRepository, PostgresTransaction, SideEffectDbError, + SideEffectStorage, +}; +use sqlx::{Postgres, Transaction}; +use std::sync::Arc; +use thiserror::Error; +use uuid::Uuid; + +use crate::OutboxMessage; + +/// Errors that can occur during outbox operations. +#[derive(Error, Debug)] +pub enum OutboxError { + /// A database operation failed. + #[error("Database error: {0}")] + Database(sqlx::Error), + /// Failed to encrypt or decrypt side effect data. + #[error("Encryption error: {0}")] + Encryption(EncryptionError), + /// Failed to pickle or unpickle side effect data. + #[error("Side effect pickling error: {0}")] + SideEffectPickling(SideEffectPicklingError), + /// Encryption provider returned wrong number of items. + #[error("Encryption provider returned wrong number of items")] + EncryptionProviderReturnedWrongNumberOfItems, +} + +impl From for OutboxError { + fn from(e: sqlx::Error) -> Self { + OutboxError::Database(e) + } +} + +impl From> for SideEffectDbError { + fn from(e: OutboxError) -> Self { + match e { + OutboxError::Database(err) => SideEffectDbError::DbError(err), + OutboxError::Encryption(err) => SideEffectDbError::Encryption(err), + OutboxError::SideEffectPickling(err) => SideEffectDbError::SideEffectPicklingError(err), + OutboxError::EncryptionProviderReturnedWrongNumberOfItems => { + SideEffectDbError::EncryptionProviderReturnedWrongNumberOfItems + } + } + } +} + +/// Default implementation of [`SideEffectStorage`] that stores messages in an `outbox` table. +#[derive(Clone, Copy, Default)] +pub struct TableOutbox { + encryption_provider: E, +} + +impl TableOutbox { + pub fn new(encryption_provider: E) -> Self { + Self { + encryption_provider, + } + } +} + +#[async_trait] +impl SideEffectStorage + for TableOutbox +where + S: SideEffect + Pickle + Send + Sync + 'static, +{ + async fn store_side_effects( + &self, + transaction: &mut Transaction<'_, Postgres>, + items: Vec, + ) -> Result<(), SideEffectDbError::Error>> { + let mut ids: Vec = Vec::with_capacity(items.len()); + let mut messages: Vec> = Vec::with_capacity(items.len()); + let mut retries: Vec = Vec::with_capacity(items.len()); + let mut requeues: Vec = Vec::with_capacity(items.len()); + let mut created_ats: Vec> = Vec::with_capacity(items.len()); + + for chunk in items.chunks(self.encryption_provider.max_batch_size()) { + let mut plain = Vec::with_capacity(chunk.len()); + for side_effect in chunk { + let id = *side_effect.id(); + let msg = side_effect + .pickle() + .map_err(SideEffectDbError::SideEffectPicklingError)?; + ids.push(id); + plain.push(msg); + retries.push(0); + requeues.push(true); + created_ats.push(Utc::now()); + } + let number_of_items = plain.len(); + let mut cipher = self + .encryption_provider + .encrypt(plain) + .await + .map_err(SideEffectDbError::Encryption)?; + if number_of_items != cipher.len() { + return Err(SideEffectDbError::EncryptionProviderReturnedWrongNumberOfItems); + } + messages.append(&mut cipher); + } + + sqlx::query( + "INSERT INTO outbox(id, message, retries, requeue, created_at) + SELECT * FROM UNNEST($1::uuid[], $2::bytea[], $3::int[], $4::boolean[], $5::timestamptz[]) + ON CONFLICT (id) DO UPDATE SET + message = excluded.message, + retries = excluded.retries, + requeue = excluded.requeue, + created_at = excluded.created_at", + ) + .bind(&ids) + .bind(&messages) + .bind(&retries) + .bind(&requeues) + .bind(&created_ats) + .execute(transaction.as_mut()) + .await?; + + Ok(()) + } +} + +#[async_trait] +pub trait TransactionOutboxExt +where + T: SideEffect + Pickle + Send + 'static, + T::SideEffectId: Clone + Send + 'static, + for<'sql> T::SideEffectId: + sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, +{ + async fn get_outbox_batch( + &mut self, + ) -> Result>, OutboxError>; + + async fn delete_outbox_item( + &mut self, + id: T::SideEffectId, + ) -> Result<(), OutboxError>; + + async fn update_outbox_item( + &mut self, + item: OutboxMessage, + ) -> Result<(), OutboxError>; +} + +#[async_trait] +impl TransactionOutboxExt::Error> + for PostgresTransaction<'_, T, TableOutbox, E> +where + T: Aggregate + Send + Sync + Pickle + 'static, + T::SideEffect: SideEffect + Pickle + Send + Sync, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::ApplyError: Send + Sync, + for<'sql> ::SideEffectId: + sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, + E: EncryptionProvider + Send + Sync + 'static, +{ + async fn get_outbox_batch( + &mut self, + ) -> Result< + Vec>, + OutboxError::Error>, + > { + #[derive(sqlx::FromRow)] + struct OutboxRow { + message: Vec, + retries: i32, + requeue: bool, + } + + let rows = sqlx::query_as::<_, OutboxRow>( + "SELECT message, retries, requeue FROM outbox \ + WHERE requeue = true ORDER BY created_at \ + FOR UPDATE SKIP LOCKED LIMIT 10", + ) + .fetch_all(self.inner_mut().as_mut()) + .await?; + + let mut messages = Vec::with_capacity(rows.len()); + for chunk in rows.chunks(self.encryption_provider().max_batch_size()) { + let cipher: Vec<_> = chunk.iter().map(|row| row.message.clone()).collect(); + let number_of_items = cipher.len(); + let mut plain = self + .encryption_provider() + .decrypt(cipher) + .await + .map_err(OutboxError::Encryption)?; + if plain.len() != number_of_items { + return Err(OutboxError::EncryptionProviderReturnedWrongNumberOfItems); + } + messages.append(&mut plain); + } + + rows.into_iter() + .zip(messages.into_iter()) + .map(|(row, message)| { + let msg = + T::SideEffect::unpickle(&message).map_err(OutboxError::SideEffectPickling)?; + Ok(OutboxMessage::new(msg, row.retries as u16, row.requeue)) + }) + .collect::, OutboxError::Error>>>() + } + + async fn delete_outbox_item( + &mut self, + id: ::SideEffectId, + ) -> Result<(), OutboxError::Error>> { + sqlx::query("DELETE FROM outbox WHERE id = $1") + .bind(id) + .execute(self.inner_mut().as_mut()) + .await?; + Ok(()) + } + + async fn update_outbox_item( + &mut self, + item: OutboxMessage, + ) -> Result<(), OutboxError::Error>> { + sqlx::query("UPDATE outbox SET retries = $2, requeue = $3 WHERE id = $1") + .bind(item.message.id()) + .bind(i32::from(item.retries)) + .bind(item.requeue) + .execute(self.inner_mut().as_mut()) + .await?; + + Ok(()) + } +} + +/// Trait for handling side effects pulled from the outbox. +/// +/// Implementors define how to process side effects that have been stored +/// in the transactional outbox. The handler controls retry behavior through +/// its return values. +/// +/// # Return Values +/// +/// - `Ok(())` - Side effect processed successfully, message will be deleted +/// - `Err((true, E))` - Processing failed, message will be requeued for retry +/// - `Err((false, E))` - Processing failed, message will be marked as non-retryable +#[async_trait] +pub trait SideEffectHandler { + type SideEffect: SideEffect; + type Error: Send; + + /// Handle a side effect message. + /// + /// This method is called for each side effect retrieved from the outbox. + /// The implementation should process the side effect and return appropriate + /// results to control retry behavior. + /// + /// # Parameters + /// + /// - `msg` - The side effect to process + /// - `retries` - Number of times this message has been retried + /// + /// # Returns + /// + /// - `Ok(())` - Processing successful, message will be deleted from outbox + /// - `Err((true, E))` - Processing failed, message will be requeued for retry + /// - `Err((false, E))` - Processing failed, message will not be retried + async fn handle(&self, msg: &Self::SideEffect, retries: u16) + -> Result<(), (bool, Self::Error)>; +} + +/// Extension trait for running the outbox worker using a [`TableOutbox`]. +#[async_trait] +pub trait RepositoryOutboxExt +where + T: Aggregate + Send + Sync + Pickle + 'static, + T::DomainEvent: Pickle + Send + Sync, + T::SideEffect: SideEffect + Pickle + Clone + Send + Sync + 'static, + ::SideEffectId: Clone + Send + 'static, + H: SideEffectHandler + Send + Sync + 'static, + E: EncryptionProvider + Clone + Send + Sync + 'static, + for<'a> PostgresTransaction<'a, T, TableOutbox, E>: + TransactionOutboxExt::Error>, + for<'sql> ::SideEffectId: + sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, +{ + async fn start_outbox( + &self, + handler: H, + poll_interval: std::time::Duration, + ) -> Result<(), OutboxError::Error>>; +} + +#[async_trait] +impl RepositoryOutboxExt for PostgresRepository, E> +where + T: Aggregate + Send + Sync + Pickle + 'static, + ::SideEffectId: Clone + Send + 'static, + T::SideEffect: SideEffect + Clone + Pickle + Send + Sync, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::ApplyError: Send + Sync, + H: SideEffectHandler + Send + Sync + 'static, + E: EncryptionProvider + Clone + Send + Sync + 'static, + for<'a> PostgresTransaction<'a, T, TableOutbox, E>: + TransactionOutboxExt::Error>, + for<'sql> ::SideEffectId: + sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, +{ + async fn start_outbox( + &self, + handler: H, + poll_interval: std::time::Duration, + ) -> Result<(), OutboxError::Error>> { + let handler = Arc::new(handler); + loop { + let deadline = std::time::Instant::now() + poll_interval; + let _ = process_outbox_batch::(self, handler.clone()).await; + tokio::time::sleep_until(deadline.into()).await; + } + } +} + +async fn process_outbox_batch( + repo: &PostgresRepository, E>, + handler: Arc, +) -> Result<(), OutboxError::Error>> +where + T: Aggregate + Send + Sync + Pickle + 'static, + T::SideEffect: SideEffect + Pickle + Send + Sync, + H: SideEffectHandler + Send + Sync, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::ApplyError: Send + Sync, + E: EncryptionProvider + Clone + Send + Sync + 'static, + for<'a> PostgresTransaction<'a, T, TableOutbox, E>: + TransactionOutboxExt::Error>, + for<'sql> ::SideEffectId: + sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, +{ + let mut tx = repo.begin_transaction().await?; + + let outbox_items: Vec> = tx.get_outbox_batch().await?; + + for mut item in outbox_items { + let id: ::SideEffectId = *item.message.id(); + + match handler.handle(&item.message, item.retries).await { + Ok(()) => { + tx.delete_outbox_item(id).await?; + } + Err((requeue, _)) => { + item.retries += 1; + item.requeue = requeue; + tx.update_outbox_item(item).await?; + } + } + } + + tx.into_inner() + .commit() + .await + .map_err(OutboxError::Database) +} diff --git a/eventastic_outbox_postgres/src/outbox_message.rs b/eventastic_outbox_postgres/src/outbox_message.rs new file mode 100644 index 0000000..6fcc2e2 --- /dev/null +++ b/eventastic_outbox_postgres/src/outbox_message.rs @@ -0,0 +1,32 @@ +use eventastic::aggregate::SideEffect; + +/// Message stored in the transactional outbox. +#[derive(Debug, Clone)] +pub struct OutboxMessage +where + T: SideEffect, +{ + /// The side effect payload. + pub message: T, + pub retries: u16, + /// Whether the message should be requeued on failure. + pub requeue: bool, +} + +impl OutboxMessage +where + T: SideEffect, +{ + pub fn new(message: T, retries: u16, requeue: bool) -> Self { + Self { + message, + retries, + requeue, + } + } + + /// Returns the retry count for this message. + pub fn retries(&self) -> u16 { + self.retries + } +} diff --git a/eventastic_postgres/Cargo.toml b/eventastic_postgres/Cargo.toml index d3d6ab8..fbf287b 100644 --- a/eventastic_postgres/Cargo.toml +++ b/eventastic_postgres/Cargo.toml @@ -1,23 +1,31 @@ [package] name = "eventastic_postgres" -version = "0.4.0" -edition = "2021" +version = "0.5.0" +edition = "2024" license = "MIT" readme = "../README.md" repository = "https://github.com/jdon/eventastic" -description = "An example postgres event store for eventastic" +description = "A postgres event store for eventastic" categories = ["web-programming", "asynchronous"] keywords = ["postgres", "postgresql", "database", "ddd", "event-sourcing"] [dependencies] -chrono = { workspace = true } -eventastic = { path = "../eventastic", version = "0.4" } -sqlx = { workspace = true } -serde_json = { workspace = true } -tokio = { workspace = true } async-trait = { workspace = true } +async-stream = { workspace = true } +chrono = { workspace = true } +eventastic = { path = "../eventastic", version = "0.5" } futures = { workspace = true } futures-util = { workspace = true } -serde = { workspace = true } -anyhow = { workspace = true } +serde = { workspace = true, optional = true } +serde_json = { workspace = true, optional = true } +sqlx = { workspace = true } thiserror = { workspace = true } +tokio = { workspace = true } + +[dev-dependencies] +eventastic_outbox_postgres = { path = "../eventastic_outbox_postgres" } +uuid = { workspace = true } + +[features] +default = [] +serde = ["dep:serde", "dep:serde_json"] diff --git a/eventastic_postgres/migrations/20230610185630_init.sql b/eventastic_postgres/migrations/20230610185630_init.sql index 0f3b5d6..1c07ba2 100644 --- a/eventastic_postgres/migrations/20230610185630_init.sql +++ b/eventastic_postgres/migrations/20230610185630_init.sql @@ -1,23 +1,28 @@ CREATE TABLE if not exists events ( - event_id uuid PRIMARY KEY, - version bigint NOT NULL, aggregate_id uuid NOT NULL, - event jsonb NOT NULL, - created_at timestamptz NOT NULL + version bigint NOT NULL CHECK (version >= 0), + event_id uuid NOT NULL, + event bytea NOT NULL, + created_at timestamptz NOT NULL, + PRIMARY KEY (aggregate_id, version) ); -CREATE UNIQUE INDEX IF NOT EXISTS aggregate_version ON events (version, aggregate_id); +CREATE UNIQUE INDEX IF NOT EXISTS events_event_id ON events (event_id); CREATE TABLE if not exists snapshots ( - aggregate_id uuid PRIMARY KEY, - snapshot jsonb NOT NULL, - created_at timestamptz NOT NULL + aggregate_id uuid NOT NULL, + aggregate bytea NOT NULL, + version bigint NOT NULL CHECK (version >= 0), + snapshot_version bigint NOT NULL, + created_at timestamptz NOT NULL, + PRIMARY KEY (aggregate_id, snapshot_version) ); CREATE TABLE if not exists outbox ( id uuid PRIMARY KEY, - message jsonb NOT NULL, - retries integer NOT NULL, + message bytea NOT NULL, + retries integer NOT NULL CHECK (retries >= 0), requeue boolean NOT NULL, created_at timestamptz NOT NULL -); \ No newline at end of file +); + diff --git a/eventastic_postgres/src/common.rs b/eventastic_postgres/src/common.rs new file mode 100644 index 0000000..abfad2d --- /dev/null +++ b/eventastic_postgres/src/common.rs @@ -0,0 +1,148 @@ +//! Common data structures and utilities shared across PostgreSQL implementations. +//! +//! This module contains shared code used by [`PostgresTransaction`] to avoid +//! duplication and ensure consistency. + +use crate::DbError; +use crate::pickle::Pickle; +use eventastic::aggregate::Aggregate; +use eventastic::event::{DomainEvent, EventStoreEvent}; +use eventastic::repository::Snapshot; +use sqlx::types::Uuid; + +/// Type alias for the complex return type of event conversion operations. +type EventResult = Result< + EventStoreEvent<::DomainEvent>, + DbError< + E, + <::DomainEvent as Pickle>::Error, + ::Error, + <::SideEffect as Pickle>::Error, + >, +>; + +/// Type alias for the complex return type of snapshot conversion operations. +type SnapshotResult = Result< + Snapshot, + DbError< + E, + <::DomainEvent as Pickle>::Error, + ::Error, + <::SideEffect as Pickle>::Error, + >, +>; + +/// Internal representation of a database row containing event data. +/// +/// This struct is used to deserialize event rows from the database +/// before converting them to the full [`EventStoreEvent`] type. +#[derive(Debug, sqlx::FromRow)] +pub(crate) struct PartialEventRow { + pub event_id: Uuid, + pub version: i64, + pub event: Vec, +} + +impl PartialEventRow { + /// Converts a [`PartialEventRow`] to an [`EventStoreEvent`]. + /// + /// This function handles deserialization of the JSON event data and + /// validation of the version number, providing consistent error handling + /// across different database operations. + /// + /// # Type Parameters + /// + /// - `Evt` - The domain event type that implements [`DomainEvent`] + /// + /// # Errors + /// + /// Returns [`DbError::InvalidVersionNumber`] if the version cannot be converted to u64. + /// Returns [`DbError::EventPicklingError`] if the event JSON cannot be deserialized. + pub fn to_event(row: PartialEventRow) -> EventResult + where + T: Aggregate + Pickle, + T::DomainEvent: DomainEvent + Pickle, + T::SideEffect: Pickle, + { + let row_version = u64::try_from(row.version).map_err(|_| DbError::InvalidVersionNumber)?; + + T::DomainEvent::unpickle(&row.event) + .map(|e| EventStoreEvent { + id: row.event_id, + event: e, + version: row_version, + }) + .map_err(DbError::EventPicklingError) + } +} + +/// Internal representation of a database row containing snapshot data. +/// +/// This struct is used to deserialize snapshot rows from the database +/// before converting them to the full [`Snapshot`] type. +#[derive(sqlx::FromRow)] +pub(crate) struct PartialSnapshotRow { + pub aggregate: Vec, + pub snapshot_version: i64, + pub version: i64, +} + +impl PartialSnapshotRow { + /// Converts a [`PartialSnapshotRow`] to a [`Snapshot`]. + /// + /// This function handles deserialization of the JSON aggregate data and + /// validation of version numbers, providing consistent error handling + /// across different database operations. + /// + /// # Type Parameters + /// + /// - `T` - The aggregate type that implements [`Aggregate`] + /// + /// # Errors + /// + /// Returns [`DbError::InvalidVersionNumber`] if the version cannot be converted to u64. + /// Returns [`DbError::InvalidSnapshotVersion`] if the snapshot version cannot be converted to u64. + /// Returns [`DbError::SnapshotPicklingError`] if the aggregate JSON cannot be deserialized. + pub fn to_snapshot(row: PartialSnapshotRow) -> SnapshotResult + where + T: Aggregate + Pickle, + T::DomainEvent: DomainEvent + Pickle, + T::SideEffect: Pickle, + { + let version = u64::try_from(row.version).map_err(|_| DbError::InvalidVersionNumber)?; + let snapshot_version = + u64::try_from(row.snapshot_version).map_err(|_| DbError::InvalidSnapshotVersion)?; + let aggregate: T = T::unpickle(&row.aggregate).map_err(DbError::SnapshotPicklingError)?; + + Ok(Snapshot { + aggregate, + version, + snapshot_version, + }) + } +} + +/// Utility functions for common validation and conversion operations. +pub(crate) mod utils { + use crate::DbError; + + /// Converts a u64 version to i64 for database storage. + /// + /// # Errors + /// + /// Returns [`DbError::InvalidVersionNumber`] if the conversion fails. + pub fn version_to_i64(version: u64) -> Result> { + i64::try_from(version).map_err(|_| DbError::InvalidVersionNumber) + } + + /// Converts a u64 snapshot version to i64 for database storage. + /// + /// # Errors + /// + /// Returns [`DbError::InvalidSnapshotVersion`] if the conversion fails. + pub fn snapshot_version_to_i64( + version: u64, + ) -> Result> { + i64::try_from(version).map_err(|_| DbError::InvalidSnapshotVersion) + } +} diff --git a/eventastic_postgres/src/encryption.rs b/eventastic_postgres/src/encryption.rs new file mode 100644 index 0000000..ab15688 --- /dev/null +++ b/eventastic_postgres/src/encryption.rs @@ -0,0 +1,54 @@ +use async_trait::async_trait; + +/// Encrypt data before storing it in the database. +#[async_trait] +pub trait EncryptionProvider { + type Error: std::error::Error + Send + Sync + 'static; + + /// Encrypt a batch of items. The batch size won't exceed the value returned + /// by [`max_batch_size`]. + async fn encrypt(&self, plain: Vec>) -> Result>, Self::Error>; + + /// Decrypt a batch of items. The batch size won't exceed the value returned + /// by [`max_batch_size`]. + async fn decrypt(&self, cipher: Vec>) -> Result>, Self::Error>; + + /// The maximum batch size to use for [`encrypt`] and [`decrypt`] operations. + fn max_batch_size(&self) -> usize; +} + +/// An [`EncryptionProvider`] that does no encryption. Can be used where you +/// don't need any encryption. +#[derive(Clone)] +pub struct NoEncryption; + +#[async_trait] +impl EncryptionProvider for NoEncryption { + type Error = NoEncryptionError; + + async fn encrypt(&self, plain: Vec>) -> Result>, Self::Error> { + Ok(plain) + } + + async fn decrypt(&self, cipher: Vec>) -> Result>, Self::Error> { + Ok(cipher) + } + + fn max_batch_size(&self) -> usize { + 100 + } +} + +/// The error type for [`NoEncryption`]. +/// +/// This can't actually be returned by `encrypt` or `decrypt` but is required by the trait. +#[derive(Debug)] +pub struct NoEncryptionError; + +impl std::fmt::Display for NoEncryptionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "NoEncryptionError") + } +} + +impl std::error::Error for NoEncryptionError {} diff --git a/eventastic_postgres/src/error.rs b/eventastic_postgres/src/error.rs new file mode 100644 index 0000000..ab120c3 --- /dev/null +++ b/eventastic_postgres/src/error.rs @@ -0,0 +1,102 @@ +use eventastic::aggregate::Aggregate; +use thiserror::Error; + +use crate::{EncryptionProvider, Pickle}; + +#[allow(type_alias_bounds)] +pub type EventSourcingDbError = DbError< + E::Error, + ::Error, + ::Error, + ::Error, +>; + +#[derive(Error, Debug)] +pub enum DbError< + EncryptionError, + EventPicklingError, + SnapshotPicklingError, + SideEffectPicklingError, +> { + /// A database operation failed. + #[error("DB Error {0}")] + DbError(sqlx::Error), + /// Failed to pickle data. + #[error("Pickling Error {0}")] + EventPicklingError(EventPicklingError), + /// Failed to pickle snapshot data. + #[error("Snapshot Pickling Error {0}")] + SnapshotPicklingError(SnapshotPicklingError), + /// Failed to pickle side effect data. + #[error("Side Effect Pickling Error {0}")] + SideEffectPicklingError(SideEffectPicklingError), + /// An invalid version number was encountered (e.g., negative value where positive expected). + #[error("Invalid Version Number")] + InvalidVersionNumber, + /// An invalid snapshot version number was encountered. + #[error("Invalid Snapshot Version Number")] + InvalidSnapshotVersion, + /// A concurrent modification was detected (optimistic locking failure). + #[error("Optimistic Concurrency Error")] + OptimisticConcurrencyError, + /// Failed to encrypt or decrypt data. + #[error("Encryption Error {0}")] + Encryption(EncryptionError), + /// Failed to encrypt or decrypt data. + #[error("Encryption provider returned wrong number of items")] + EncryptionProviderReturnedWrongNumberOfItems, +} + +/// Errors that can occur during side effect storage operations. +/// +/// This is a specialized error type for side effect operations that only +/// includes the errors relevant to storing and retrieving side effects. +#[derive(Error, Debug)] +pub enum SideEffectDbError { + /// A database operation failed. + #[error("DB Error {0}")] + DbError(sqlx::Error), + /// Failed to pickle side effect data. + #[error("Side Effect Pickling Error {0}")] + SideEffectPicklingError(SideEffectPicklingError), + /// Failed to encrypt or decrypt data. + #[error("Encryption Error {0}")] + Encryption(EncryptionError), + /// Encryption provider returned wrong number of items. + #[error("Encryption provider returned wrong number of items")] + EncryptionProviderReturnedWrongNumberOfItems, +} + +impl From for DbError { + fn from(e: sqlx::Error) -> Self { + if let Some(db_error) = e.as_database_error() { + if let Some(code) = db_error.code() { + if code == "23505" && db_error.message().contains("aggregate_version") { + return DbError::OptimisticConcurrencyError; + } + } + } + DbError::DbError(e) + } +} + +impl From for SideEffectDbError { + fn from(e: sqlx::Error) -> Self { + SideEffectDbError::DbError(e) + } +} + +impl From> for DbError { + fn from(e: SideEffectDbError) -> Self { + match e { + SideEffectDbError::DbError(err) => DbError::DbError(err), + SideEffectDbError::SideEffectPicklingError(err) => { + DbError::SideEffectPicklingError(err) + } + SideEffectDbError::Encryption(err) => DbError::Encryption(err), + SideEffectDbError::EncryptionProviderReturnedWrongNumberOfItems => { + DbError::EncryptionProviderReturnedWrongNumberOfItems + } + } + } +} diff --git a/eventastic_postgres/src/lib.rs b/eventastic_postgres/src/lib.rs index 152a87e..08085a7 100644 --- a/eventastic_postgres/src/lib.rs +++ b/eventastic_postgres/src/lib.rs @@ -1,18 +1,92 @@ -mod outbox; +mod common; +mod encryption; +mod error; +mod pickle; +mod reader_impl; mod repository; +mod side_effect; +mod table_config; mod transaction; + +pub use encryption::{EncryptionProvider, NoEncryption, NoEncryptionError}; +pub use error::{DbError, SideEffectDbError}; +pub use pickle::Pickle; pub use repository::PostgresRepository; -use thiserror::Error; +pub use side_effect::SideEffectStorage; +pub use table_config::TableConfig; pub use transaction::PostgresTransaction; -#[derive(Error, Debug)] -pub enum DbError { - #[error("DB Error {0}")] - DbError(#[from] sqlx::Error), - #[error("Serialization Error {0}")] - SerializationError(#[source] serde_json::Error), - #[error("Optimistic Concurrency Error {0}")] - OptimisticConcurrency(sqlx::Error), - #[error("Invalid Version Number")] - InvalidVersionNumber, +use crate::error::EventSourcingDbError; + +use async_trait::async_trait; +use eventastic::{ + aggregate::{Aggregate, Context, SideEffect}, + event::DomainEvent, + repository::{Repository, RepositoryError}, +}; +use sqlx::types::Uuid; + +/// Extension trait for loading aggregates from PostgreSQL storage. +/// +/// This trait provides PostgreSQL-specific methods for working with aggregates +/// that have UUID-based identifiers and can be serialized to JSON. +#[async_trait] +pub trait RootExt +where + T: Aggregate + Pickle + Send + Sync + 'static, + ::DomainEvent: DomainEvent + Pickle + Send + Sync, + ::SideEffect: SideEffect + Pickle + Send + Sync, + ::ApplyError: Send + Sync, + O: SideEffectStorage + Send + Sync, + E: EncryptionProvider + Clone + Send + Sync, +{ + /// Loads an aggregate from PostgreSQL storage by its UUID using an existing transaction. + /// + /// This method replays the event stream for the given aggregate ID, + /// starting from any available snapshot and applying subsequent events. + async fn load_with_transaction( + transaction: &mut PostgresTransaction<'_, T, O, E>, + aggregate_id: Uuid, + ) -> Result< + Context, + RepositoryError< + T::ApplyError, + <::DomainEvent as DomainEvent>::EventId, + EventSourcingDbError, + >, + > { + Context::load(transaction, &aggregate_id).await + } + + /// Loads an aggregate from PostgreSQL storage by its UUID without a transaction. + /// + /// This method is more efficient for read-only operations as it uses a + /// connection directly from the pool without starting a transaction. + async fn load( + repository: &PostgresRepository, + aggregate_id: Uuid, + ) -> Result< + Context, + RepositoryError< + T::ApplyError, + <::DomainEvent as DomainEvent>::EventId, + EventSourcingDbError, + >, + > + where + O: Clone, + { + repository.load(&aggregate_id).await + } +} + +impl RootExt for T +where + T: Aggregate + Pickle + Send + Sync + 'static, + ::DomainEvent: DomainEvent + Pickle + Send + Sync, + ::SideEffect: SideEffect + Pickle + Send + Sync, + ::ApplyError: Send + Sync, + O: SideEffectStorage + Send + Sync, + E: EncryptionProvider + Clone + Send + Sync, +{ } diff --git a/eventastic_postgres/src/outbox.rs b/eventastic_postgres/src/outbox.rs deleted file mode 100644 index 4968f61..0000000 --- a/eventastic_postgres/src/outbox.rs +++ /dev/null @@ -1,106 +0,0 @@ -use crate::{DbError, PostgresRepository}; -use eventastic::aggregate::{SideEffect, SideEffectHandler}; -use serde::de::DeserializeOwned; -use sqlx::Postgres; -use std::sync::Arc; - -#[derive(Debug, Clone)] -pub struct OutBoxMessage -where - T: SideEffect, -{ - /// The contents of this outbox message - /// This is usually an enum - pub message: T, - - /// The amount of times this message has been retried - pub(crate) retries: u16, - - /// Whether or not to requeue this item - /// If set to false this item won't be requeued but will remain in the repository. - pub requeue: bool, -} - -impl OutBoxMessage -where - T: SideEffect, -{ - pub fn new(message: T, retries: u16, requeue: bool) -> OutBoxMessage { - OutBoxMessage { - message, - retries, - requeue, - } - } -} - -impl OutBoxMessage -where - T: SideEffect, -{ - /// The amount of times this message has been retried - pub fn retries(&self) -> u16 { - self.retries - } -} - -impl PostgresRepository { - /// Start the outbox. - /// This function will run forever, so should generally be spawned as a background task - pub async fn start_outbox( - &self, - handler: H, - poll_interval: std::time::Duration, - ) -> Result<(), DbError> - where - T: SideEffect + DeserializeOwned, - H: SideEffectHandler, - for<'sql> ::Id: sqlx::Decode<'sql, Postgres> - + sqlx::Type - + sqlx::Encode<'sql, Postgres> - + Unpin, - { - let handler = Arc::new(handler); - loop { - let deadline = std::time::Instant::now() + poll_interval; - - // Errors are ignored in the default implementation as they are added to the dead box. - let _ = self.process_outbox_batch::(handler.clone()).await; - tokio::time::sleep_until(deadline.into()).await; - } - } - - /// Process a batch of outbox items. - #[doc(hidden)] - async fn process_outbox_batch(&self, handler: Arc) -> Result<(), DbError> - where - T: SideEffect + DeserializeOwned, - H: SideEffectHandler, - for<'sql> ::Id: sqlx::Decode<'sql, Postgres> - + sqlx::Type - + sqlx::Encode<'sql, Postgres> - + Unpin, - { - let mut tx = self.begin_transaction().await?; - - let outbox_items = tx.get_outbox_batch::().await?; - - for mut item in outbox_items { - let item_id = item.message.id().clone(); - - match handler.handle(&item.message, item.retries).await { - Ok(_) => { - tx.delete_outbox_item(item_id).await?; - } - Err((requeue, _)) => { - item.retries += 1; - item.requeue = requeue; - tx.update_outbox_item(item).await?; - } - }; - } - - tx.commit().await?; - Ok(()) - } -} diff --git a/eventastic_postgres/src/pickle.rs b/eventastic_postgres/src/pickle.rs new file mode 100644 index 0000000..0281e9d --- /dev/null +++ b/eventastic_postgres/src/pickle.rs @@ -0,0 +1,30 @@ +/// Preserve your data so it can be stored in the database. +/// +/// [`Pickle`] is implemented for types that implement [`serde::Serialize`] +/// and [`serde::de::DeserializeOwned`]. It uses [`serde_json`] to do the +/// serialization. +pub trait Pickle: Sized { + type Error: std::error::Error + Send + Sync + 'static; + + /// Convert to bytes for storage. + fn pickle(&self) -> Result, Self::Error>; + + /// Convert back to `Self` from bytes. + fn unpickle(bytes: &[u8]) -> Result; +} + +#[cfg(feature = "serde")] +impl Pickle for T +where + T: serde::Serialize + serde::de::DeserializeOwned, +{ + type Error = serde_json::Error; + + fn pickle(&self) -> Result, Self::Error> { + serde_json::to_vec(self) + } + + fn unpickle(bytes: &[u8]) -> Result { + serde_json::from_slice(bytes) + } +} diff --git a/eventastic_postgres/src/reader_impl.rs b/eventastic_postgres/src/reader_impl.rs new file mode 100644 index 0000000..8dcde6c --- /dev/null +++ b/eventastic_postgres/src/reader_impl.rs @@ -0,0 +1,143 @@ +//! Generic implementations for [`RepositoryReader`] operations. +//! +//! This module contains shared implementation logic for reading operations +//! that can be used by both [`PostgresTransaction`] and [`PostgresConnection`]. +//! All operations use dynamic table names provided via the [`TableConfig`]. + +use std::sync::Arc; + +use crate::common::{PartialEventRow, PartialSnapshotRow, utils}; +use crate::pickle::Pickle; +use crate::{DbError, EncryptionProvider, EventSourcingDbError}; +use eventastic::aggregate::Aggregate; +use eventastic::event::DomainEvent; +use eventastic::event::EventStoreEvent; +use eventastic::repository::Snapshot; +use futures_util::stream::StreamExt; +use sqlx::types::Uuid; +use sqlx::{Executor, query_as}; + +/// Generic implementation for streaming events from configured table. +pub fn stream_from<'e, 'c: 'e, E, T, EP>( + executor: E, + id: &T::AggregateId, + version: u64, + query: Arc, + encryption_provider: &'e EP, +) -> impl futures::Stream< + Item = std::result::Result, EventSourcingDbError>, +> + 'e +where + E: Executor<'c, Database = sqlx::Postgres> + 'e, + T: Aggregate + Pickle, + T::DomainEvent: DomainEvent + Pickle + Send + 'e, + T::SideEffect: Pickle, + EP: EncryptionProvider + Sync + Send + 'e, +{ + let id = *id; + + async_stream::stream! { + let version = utils::version_to_i64(version)?; + + let chunks = query_as::<_, PartialEventRow>(&query) + .bind(id) + .bind(version) + .fetch(executor) + .chunks(encryption_provider.max_batch_size()); + + for await chunk in chunks { + let chunk = chunk.into_iter().collect::, _>>()?; + // TODO: We could have the query return a vector of events rather than doing this here. + let cipher: Vec<_> = chunk.iter().map(|row| row.event.clone()).collect(); + let number_of_items = cipher.len(); + let plain = encryption_provider + .decrypt(cipher) + .await + .map_err(DbError::Encryption)?; + if plain.len() != number_of_items { + Err(DbError::EncryptionProviderReturnedWrongNumberOfItems)?; + } + for (mut row, plain) in chunk.into_iter().zip(plain.into_iter()) { + row.event = plain; + yield PartialEventRow::to_event::(row); + } + } + } +} + +/// Generic implementation for getting an event by ID from configured table. +pub async fn get_event<'c, E, T, EP>( + executor: E, + aggregate_id: &T::AggregateId, + event_id: &<::DomainEvent as DomainEvent>::EventId, + query: &str, + encryption_provider: &EP, +) -> Result::DomainEvent>>, EventSourcingDbError> +where + E: Executor<'c, Database = sqlx::Postgres>, + T: Aggregate + Pickle, + T::DomainEvent: DomainEvent + Pickle + Send, + T::SideEffect: Pickle, + EP: EncryptionProvider, +{ + let Some(mut row) = query_as::<_, PartialEventRow>(query) + .bind(aggregate_id) + .bind(event_id) + .fetch_optional(executor) + .await? + else { + return Ok(None); + }; + let mut plain = encryption_provider + .decrypt(vec![row.event]) + .await + .map_err(DbError::Encryption)? + .into_iter(); + let Some(event) = plain.next() else { + return Err(DbError::EncryptionProviderReturnedWrongNumberOfItems); + }; + if plain.next().is_some() { + return Err(DbError::EncryptionProviderReturnedWrongNumberOfItems); + } + row.event = event; + Ok(Some(PartialEventRow::to_event::(row)?)) +} + +/// Generic implementation for getting a snapshot from configured table. +pub async fn get_snapshot<'c, E, T, EP>( + executor: E, + id: &T::AggregateId, + query: &str, + encryption_provider: &EP, +) -> Result>, EventSourcingDbError> +where + E: Executor<'c, Database = sqlx::Postgres>, + T: Aggregate + Pickle, + T::DomainEvent: DomainEvent + Pickle, + T::SideEffect: Pickle, + EP: EncryptionProvider, +{ + let row = query_as::<_, PartialSnapshotRow>(query) + .bind(id) + .bind(utils::snapshot_version_to_i64(T::SNAPSHOT_VERSION)?) + .fetch_optional(executor) + .await?; + + let Some(mut row) = row else { + return Ok(None); + }; + + let plain = encryption_provider + .decrypt(vec![row.aggregate.clone()]) + .await + .map_err(DbError::Encryption)?; + if plain.len() != 1 { + Err(DbError::EncryptionProviderReturnedWrongNumberOfItems)?; + } + row.aggregate = plain + .into_iter() + .next() + .expect("Decrypt must return 1 item for snapshot"); + + Ok(Some(PartialSnapshotRow::to_snapshot::(row)?)) +} diff --git a/eventastic_postgres/src/repository.rs b/eventastic_postgres/src/repository.rs index 38a15b9..8fb7149 100644 --- a/eventastic_postgres/src/repository.rs +++ b/eventastic_postgres/src/repository.rs @@ -1,35 +1,191 @@ -use crate::PostgresTransaction; +use crate::{ + EventSourcingDbError, PostgresTransaction, SideEffectStorage, encryption::EncryptionProvider, + pickle::Pickle, reader_impl, table_config::TableConfig, +}; +use async_trait::async_trait; +use eventastic::{ + aggregate::{Aggregate, Context, SideEffect}, + event::{DomainEvent, EventStoreEvent}, + repository::{Repository, RepositoryError, RepositoryReader, Snapshot}, +}; use sqlx::{ - postgres::{PgConnectOptions, PgPoolOptions}, Pool, Postgres, + postgres::{PgConnectOptions, PgPoolOptions}, + types::Uuid, }; +use std::marker::PhantomData; +/// PostgreSQL-based repository implementation for event sourcing. +/// +/// This repository provides persistent storage for aggregates, events, and snapshots +/// using PostgreSQL as the backing store. It integrates with a configurable side effect +/// storage mechanism for handling the outbox pattern. #[derive(Clone)] -pub struct PostgresRepository { - pub(crate) inner: Pool, +pub struct PostgresRepository { + inner: Pool, + outbox: O, + table_config: TableConfig, + encryption_provider: E, + phantom_aggregate: std::marker::PhantomData, } -impl PostgresRepository { +impl PostgresRepository { + /// Creates a new PostgreSQL repository with the specified connection and pool options. + /// + /// # Parameters + /// + /// - `connect_options` - PostgreSQL connection configuration + /// - `pool_options` - Connection pool configuration + /// - `outbox` - Side effect storage implementation for the outbox pattern pub async fn new( connect_options: PgConnectOptions, pool_options: PgPoolOptions, + table_config: TableConfig, + outbox: O, + encryption_provider: E, ) -> Result { let pool = pool_options.connect_with(connect_options).await?; - Ok(Self { inner: pool }) + Ok(Self { + inner: pool, + outbox, + table_config, + encryption_provider, + phantom_aggregate: PhantomData, + }) } - /// Start a new transaction using the default isolation level - pub async fn begin_transaction(&self) -> Result, sqlx::Error> { + /// Start a new database transaction using the default isolation level. + /// + /// The returned transaction can be used to perform multiple operations + /// atomically and provides access to the repository methods. + pub async fn begin_transaction(&self) -> Result, sqlx::Error> { Ok(PostgresTransaction { inner: self.inner.begin().await?, + outbox: &self.outbox, + table_config: &self.table_config, + encryption_provider: &self.encryption_provider, + phantom_aggregate: PhantomData, }) } - /// Run migrations on the database + /// Create a transaction from an existing raw sqlx transaction. + /// + /// This is useful for multi-aggregate scenarios where you want to use + /// the same database transaction across multiple repository types. + pub fn transaction_from<'a>( + &'a self, + transaction: sqlx::Transaction<'a, Postgres>, + ) -> PostgresTransaction<'a, T, O, E> { + PostgresTransaction { + inner: transaction, + outbox: &self.outbox, + table_config: &self.table_config, + encryption_provider: &self.encryption_provider, + phantom_aggregate: PhantomData, + } + } + + /// Run database migrations to set up the required tables and schema. + /// + /// This method should be called once during application startup to ensure + /// the database schema is up to date with the required tables for events, + /// snapshots, and outbox storage. pub async fn run_migrations(&self) -> Result<(), sqlx::Error> { sqlx::migrate!("./migrations").run(&self.inner).await?; Ok(()) } } + +#[async_trait] +impl RepositoryReader for PostgresRepository +where + T: Aggregate + Pickle + Send + Sync + 'static, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::SideEffect: SideEffect + Pickle + Send + Sync, + T::ApplyError: Send + Sync, + O: SideEffectStorage + Clone + Send + Sync, + E: EncryptionProvider + Clone + Send + Sync, +{ + type DbError = EventSourcingDbError; + + /// Returns a stream of domain events. + fn stream_from( + &mut self, + id: &T::AggregateId, + version: u64, + ) -> impl futures::Stream< + Item = std::result::Result< + eventastic::event::EventStoreEvent< + ::DomainEvent, + >, + Self::DbError, + >, + > { + let query = &self.table_config.stream_events_query; + Box::pin(reader_impl::stream_from::<_, T, E>( + &self.inner, + id, + version, + query.clone(), + &self.encryption_provider, + )) + } + + /// Returns a specific domain event from the database. + async fn get_event( + &mut self, + aggregate_id: &T::AggregateId, + event_id: &<::DomainEvent as DomainEvent>::EventId, + ) -> Result::DomainEvent>>, Self::DbError> { + let query = &self.table_config.get_event_query; + reader_impl::get_event::<_, T, E>( + &self.inner, + aggregate_id, + event_id, + query, + &self.encryption_provider, + ) + .await + } + + /// Returns a snapshot of the aggregate in the database + async fn get_snapshot( + &mut self, + id: &T::AggregateId, + ) -> Result>, Self::DbError> { + let query = &self.table_config.get_snapshot_query; + reader_impl::get_snapshot::<_, T, E>(&self.inner, id, query, &self.encryption_provider) + .await + } +} + +#[async_trait] +impl Repository for PostgresRepository +where + T: Aggregate + Pickle + Send + Sync + 'static, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::SideEffect: eventastic::aggregate::SideEffect + Pickle + Send + Sync, + T::ApplyError: Send + Sync, + O: SideEffectStorage + Clone + Send + Sync, + E: EncryptionProvider + Clone + Send + Sync, +{ + type Error = RepositoryError< + T::ApplyError, + <::DomainEvent as DomainEvent>::EventId, + EventSourcingDbError, + >; + + /// Loads an aggregate from the repository by its ID. + /// + /// This method performs a non-transactional read directly from the pool, + /// avoiding the overhead of starting a transaction. It will load the + /// latest state of the aggregate by replaying its event stream. + /// If a snapshot is available, it will be used to optimize the loading process. + async fn load(&self, aggregate_id: &T::AggregateId) -> Result, Self::Error> { + // Create a mutable reference to self to satisfy the RepositoryReader trait + let mut repo_ref = self.clone(); + Context::load(&mut repo_ref, aggregate_id).await + } +} diff --git a/eventastic_postgres/src/side_effect.rs b/eventastic_postgres/src/side_effect.rs new file mode 100644 index 0000000..2452bf3 --- /dev/null +++ b/eventastic_postgres/src/side_effect.rs @@ -0,0 +1,36 @@ +use crate::SideEffectDbError; +use crate::pickle::Pickle; +use async_trait::async_trait; +use eventastic::aggregate::SideEffect; +use sqlx::types::Uuid; +use sqlx::{Postgres, Transaction}; + +/// Trait for storing side effects in a PostgreSQL database. +/// +/// This trait abstracts the storage mechanism for side effects, allowing +/// different implementations such as direct table storage or outbox patterns. +/// Implementors define how side effects are persisted within a database transaction. +#[async_trait] +pub trait SideEffectStorage: Send + Sync +where + T: SideEffect + Pickle + Send + Sync, +{ + /// Store a collection of side effects within the given database transaction. + /// + /// This method is called as part of the aggregate save process to ensure + /// side effects are stored atomically with domain events. + /// + /// # Parameters + /// + /// - `transaction` - The database transaction to use for storage + /// - `items` - Collection of side effects to store + /// + /// # Errors + /// + /// Returns [`SideEffectDbError`] if the storage operation fails. + async fn store_side_effects( + &self, + transaction: &mut Transaction<'_, Postgres>, + items: Vec, + ) -> Result<(), SideEffectDbError::Error>>; +} diff --git a/eventastic_postgres/src/table_config.rs b/eventastic_postgres/src/table_config.rs new file mode 100644 index 0000000..999fe4b --- /dev/null +++ b/eventastic_postgres/src/table_config.rs @@ -0,0 +1,49 @@ +use std::sync::Arc; + +/// Configuration for database tables used by an aggregate type. +/// +/// This struct contains pre-computed SQL queries to avoid string allocation +/// during query execution. +#[derive(Debug, Clone)] +pub struct TableConfig { + pub(crate) stream_events_query: Arc, + pub(crate) get_event_query: String, + pub(crate) get_snapshot_query: String, + pub(crate) insert_events_query: String, + pub(crate) upsert_snapshot_query: String, +} + +impl TableConfig { + /// Create a new TableConfig with pre-computed queries. + pub fn new(events: impl Into, snapshots: impl Into) -> Self { + let events = events.into(); + let snapshots = snapshots.into(); + + Self { + stream_events_query: format!( + "SELECT event, event_id, version FROM {} WHERE aggregate_id = $1 AND version >= $2 ORDER BY version ASC", + &events + ).into(), + get_event_query: format!( + "SELECT event, event_id, version FROM {} WHERE aggregate_id = $1 AND event_id = $2", + &events + ), + get_snapshot_query: format!( + "SELECT aggregate, version, snapshot_version FROM {} WHERE aggregate_id = $1 AND snapshot_version = $2", + &snapshots + ), + insert_events_query: format!( + "INSERT INTO {} (event_id, version, aggregate_id, event, created_at) \ + SELECT * FROM UNNEST($1::uuid[], $2::bigint[], $3::uuid[], $4::bytea[], $5::timestamptz[]) \ + ON CONFLICT DO NOTHING returning event_id", + &events + ), + upsert_snapshot_query: format!( + "INSERT INTO {} (aggregate_id, aggregate, version, snapshot_version, created_at) \ + VALUES ($1, $2, $3, $4, $5) \ + ON CONFLICT (aggregate_id, snapshot_version) DO UPDATE SET aggregate = $2, version = $3, created_at = $5", + &snapshots + ), + } + } +} diff --git a/eventastic_postgres/src/transaction.rs b/eventastic_postgres/src/transaction.rs index 3e8f811..b76392e 100644 --- a/eventastic_postgres/src/transaction.rs +++ b/eventastic_postgres/src/transaction.rs @@ -1,324 +1,256 @@ -use std::fmt::Debug; - -use crate::outbox::OutBoxMessage; -use crate::DbError; +use crate::common::utils; +use crate::pickle::Pickle; +use crate::table_config::TableConfig; +use crate::{DbError, EncryptionProvider, EventSourcingDbError, SideEffectStorage, reader_impl}; use async_trait::async_trait; use chrono::DateTime; use chrono::Utc; -use eventastic::aggregate::Aggregate; +use eventastic::aggregate::SaveError; use eventastic::aggregate::SideEffect; -use eventastic::event::Event; +use eventastic::aggregate::{Aggregate, Context}; +use eventastic::event::DomainEvent; use eventastic::event::EventStoreEvent; -use eventastic::event::Stream; -use eventastic::repository::RepositoryTransaction; use eventastic::repository::Snapshot; -use futures::stream; -use futures_util::stream::StreamExt; -use serde::de::DeserializeOwned; -use serde::Serialize; -use sqlx::query; -use sqlx::query_as; -use sqlx::types::JsonValue; -use sqlx::QueryBuilder; +use eventastic::repository::{RepositoryError, RepositoryReader, RepositoryWriter}; +use sqlx::Row; +use sqlx::types::Uuid; use sqlx::{Postgres, Transaction}; -pub struct PostgresTransaction<'a> { +/// PostgreSQL transaction wrapper that implements the [`RepositoryWriter`] and [`RepositoryReader`] traits. +/// +/// This struct provides transactional access to PostgreSQL storage for event sourcing +/// operations. It manages database transactions and integrates with side effect storage. +pub struct PostgresTransaction<'a, T, O, E> { pub(crate) inner: Transaction<'a, Postgres>, + pub(crate) outbox: &'a O, + pub(crate) table_config: &'a TableConfig, + pub(crate) encryption_provider: &'a E, + pub(crate) phantom_aggregate: std::marker::PhantomData, } -impl<'a> PostgresTransaction<'a> { - /// Commit the transaction to the db. - pub async fn commit(self) -> Result<(), sqlx::Error> { - self.inner.commit().await +impl<'a, T, O, E> PostgresTransaction<'a, T, O, E> +where + T: eventastic::aggregate::Aggregate + Pickle, + T::DomainEvent: Pickle, + T::SideEffect: Pickle, + E: EncryptionProvider, +{ + /// Commit the transaction to the database. + /// + /// This finalizes all operations performed within this transaction, + /// making them permanently visible to other database connections. + pub async fn commit(self) -> Result<(), EventSourcingDbError> { + Ok(self.inner.commit().await?) } - /// Returns a batch of 10 outbox items - pub async fn get_outbox_batch(&mut self) -> Result>, DbError> - where - T: DeserializeOwned, - T: SideEffect, - { - let messages = query_as::<_, OutBoxRow>( - "SELECT * from outbox WHERE requeue = true ORDER BY created_at FOR UPDATE SKIP LOCKED LIMIT 10 " - ) - .fetch_all(&mut *self.inner) - .await?; - - messages - .into_iter() - .map(|m| { - let message = - serde_json::from_value(m.message).map_err(DbError::SerializationError)?; - Ok(OutBoxMessage::new(message, m.retries as _, m.requeue)) - }) - .collect::, _>>() + /// Rollback the transaction, discarding all changes. + /// + /// This undoes all operations performed within this transaction, + /// returning the database to its state before the transaction began. + pub async fn rollback(self) -> Result<(), EventSourcingDbError> { + Ok(self.inner.rollback().await?) } - /// Deletes an item from the outbox. - #[doc(hidden)] - pub async fn delete_outbox_item(&mut self, outbox_id: T) -> Result<(), DbError> - where - for<'sql> T: sqlx::Decode<'sql, Postgres> - + sqlx::Type - + sqlx::Encode<'sql, Postgres> - + Unpin - + Send - + Sync, - { - let _ = query("DELETE FROM outbox where id = $1") - .bind(outbox_id) - .execute(&mut *self.inner) - .await?; - Ok(()) + /// Get the inner postgres transaction + pub fn into_inner(self) -> Transaction<'a, Postgres> { + self.inner } - /// Update the [`OutBoxMessage::retries`] and [`OutBoxMessage:requeue`] for a specific [`OutBoxMessage`] - #[doc(hidden)] - pub async fn update_outbox_item( - &mut self, - outbox_item: OutBoxMessage, - ) -> Result<(), DbError> - where - T: SideEffect + DeserializeOwned, - for<'sql> T::Id: sqlx::Decode<'sql, Postgres> - + sqlx::Type - + sqlx::Encode<'sql, Postgres> - + Unpin, - { - let _ = query("UPDATE outbox set retries = $2, requeue = $3 where id = $1") - .bind(outbox_item.message.id()) - .bind(outbox_item.retries as i32) - .bind(outbox_item.requeue) - .execute(&mut *self.inner) - .await?; - Ok(()) + /// Returns a mutable reference to the underlying [`sqlx::Transaction`]. + pub fn inner_mut(&mut self) -> &mut Transaction<'a, Postgres> { + &mut self.inner } -} -#[derive(sqlx::FromRow)] -struct PartialSnapShotRow { - snapshot: serde_json::Value, -} - -#[derive(Debug, sqlx::FromRow)] -struct PartialEventRow -where - EId: Unpin, -{ - event_id: EId, - version: i64, - event: JsonValue, -} - -struct FullEventRow -where - EId: Unpin, - AId: Unpin, -{ - event_id: EId, - version: i64, - aggregate_id: AId, - event: JsonValue, - created_at: DateTime, + /// Get the encryption provider reference + pub fn encryption_provider(&self) -> &E { + self.encryption_provider + } } -impl PartialEventRow +impl<'a, T, O, E> PostgresTransaction<'a, T, O, E> where - EId: Debug + Send + Sync + Unpin, + O: SideEffectStorage, + E: EncryptionProvider + Send + Sync + 'static, + T: Aggregate + 'static + Send + Sync + Pickle, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::SideEffect: SideEffect + Pickle + Send + Sync, + T::ApplyError: Send + Sync, { - fn to_event( - row: PartialEventRow, - ) -> Result, DbError> - where - Evt: Send + Sync + Clone + Eq, - for<'de> Evt: serde::Deserialize<'de>, - { - let row_version = u64::try_from(row.version).map_err(|_| DbError::InvalidVersionNumber)?; - match serde_json::from_value::(row.event) { - Ok(e) => Ok(EventStoreEvent { - id: row.event_id, - event: e, - version: row_version, - }), - Err(e) => Err(DbError::SerializationError(e)), - } + /// Get an aggregate by ID. + pub async fn get( + &mut self, + id: &Uuid, + ) -> Result, RepositoryError>> { + Context::load(self, id).await } -} -#[derive(Debug, sqlx::FromRow)] -struct OutBoxRow { - message: JsonValue, - retries: i32, - requeue: bool, - #[allow(dead_code)] - created_at: DateTime, + /// Store an aggregate. + pub async fn store( + &mut self, + aggregate: &mut Context, + ) -> Result<(), SaveError>> { + aggregate.save(self).await + } } #[async_trait] -impl RepositoryTransaction for PostgresTransaction<'a> +impl RepositoryReader for PostgresTransaction<'_, T, O, E> where - T: Aggregate + 'a + DeserializeOwned + Serialize, - ::DomainEvent: Serialize, - for<'sql> T::DomainEventId: - sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, - for<'sql> T::AggregateId: - sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, - for<'sql> <::SideEffect as SideEffect>::Id: - sqlx::Decode<'sql, Postgres> + sqlx::Type + sqlx::Encode<'sql, Postgres> + Unpin, - for<'de> ::DomainEvent: serde::Deserialize<'de>, + T: Aggregate + 'static + Pickle + Send + Sync, + T::SideEffect: SideEffect + Pickle + Send + Sync, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::ApplyError: Send + Sync, + O: SideEffectStorage, + E: EncryptionProvider + Send + Sync, { - /// The type of error that is returned from the database. - type DbError = DbError; - - /// Returns a stream of domain events. - fn stream( - &mut self, - id: &T::AggregateId, - ) -> Stream { - let res = query_as::<_, PartialEventRow>( - " - SELECT event, event_id, version - FROM events - where aggregate_id = $1 ORDER BY version ASC", - ) - .bind(id.clone()) - .fetch(&mut *self.inner); - - res.map(|row| match row { - Ok(row) => PartialEventRow::to_event(row), - Err(e) => Err(DbError::DbError(e)), - }) - .boxed() - } + type DbError = EventSourcingDbError; /// Returns a stream of domain events. fn stream_from( &mut self, id: &T::AggregateId, version: u64, - ) -> Stream { - let Ok(version) = i64::try_from(version) else { - return stream::iter(vec![Err(DbError::InvalidVersionNumber)]).boxed(); - }; - - let res = query_as::<_, PartialEventRow>( - " - SELECT event, event_id, version - FROM events - where aggregate_id = $1 AND version >= $2 ORDER BY version ASC", - ) - .bind(id.clone()) - .bind(version) - .fetch(&mut *self.inner); - - res.map(|row| match row { - Ok(row) => PartialEventRow::to_event(row), - Err(e) => Err(DbError::DbError(e)), - }) - .boxed() + ) -> impl futures::Stream< + Item = std::result::Result< + eventastic::event::EventStoreEvent< + ::DomainEvent, + >, + Self::DbError, + >, + > { + let query = &self.table_config.stream_events_query; + Box::pin(reader_impl::stream_from::<_, T, E>( + &mut *self.inner, + id, + version, + query.clone(), + self.encryption_provider, + )) } /// Returns a specific domain event from the database. async fn get_event( &mut self, aggregate_id: &T::AggregateId, - event_id: &T::DomainEventId, - ) -> Result< - Option::DomainEventId, ::DomainEvent>>, - Self::DbError, - > { - let res = query_as::<_, PartialEventRow>( - "SELECT event, event_id, version FROM events where aggregate_id = $1 AND event_id = $2", + event_id: &<::DomainEvent as DomainEvent>::EventId, + ) -> Result::DomainEvent>>, Self::DbError> { + let query = &self.table_config.get_event_query; + reader_impl::get_event::<_, T, E>( + &mut *self.inner, + aggregate_id, + event_id, + query, + self.encryption_provider, ) - .bind(aggregate_id) - .bind(event_id) - .fetch_optional(&mut *self.inner) - .await; - - match res { - Ok(Some(row)) => match PartialEventRow::to_event(row) { - Ok(e) => Ok(Some(e)), - Err(e) => Err(e), - }, - Ok(None) => Ok(None), - Err(e) => Err(DbError::DbError(e)), - } + .await } - /// Adds new domain events to the database - async fn append( + /// Returns a snapshot of the aggregate in the database + async fn get_snapshot( &mut self, id: &T::AggregateId, - events: Vec>, - ) -> Result<(), Self::DbError> { - if events.is_empty() { - return Ok(()); - } + ) -> Result>, Self::DbError> { + let query = &self.table_config.get_snapshot_query; + reader_impl::get_snapshot::<_, T, E>(&mut *self.inner, id, query, self.encryption_provider) + .await + } +} - let events = events - .into_iter() - .map(|event| { - let event_id = event.id().clone(); +#[async_trait] +impl RepositoryWriter for PostgresTransaction<'_, T, O, E> +where + T: Aggregate + 'static + Pickle + Send + Sync, + T::SideEffect: SideEffect + Pickle + Send + Sync, + T::DomainEvent: DomainEvent + Pickle + Send + Sync, + T::ApplyError: Send + Sync, + O: SideEffectStorage, + E: EncryptionProvider + Send + Sync, +{ + /// Stores new domain events to the database + async fn store_events( + &mut self, + id: &T::AggregateId, + events: Vec>, + ) -> Result::DomainEvent as DomainEvent>::EventId>, Self::DbError> { + let mut event_ids_to_insert: Vec<<::DomainEvent as DomainEvent>::EventId> = + Vec::with_capacity(events.len()); + let mut versions_to_insert: Vec = Vec::with_capacity(events.len()); + let mut aggregate_ids_to_insert: Vec = Vec::with_capacity(events.len()); + let mut events_to_insert: Vec> = Vec::with_capacity(events.len()); + let mut created_ats_to_insert: Vec> = Vec::with_capacity(events.len()); + + for events in events.chunks(self.encryption_provider.max_batch_size()) { + let mut plain = Vec::with_capacity(events.len()); + for event in events { + let event_id = *event.id(); let version = event.version; - let version = i64::try_from(version).map_err(|_| DbError::InvalidVersionNumber)?; - - match serde_json::to_value(event.event).map_err(DbError::SerializationError) { - Ok(s) => Ok(FullEventRow { - event_id, - version, - aggregate_id: id.clone(), - event: s, - created_at: Utc::now(), - }), - Err(e) => Err(e), - } - }) - .collect::>, Self::DbError>>( - )?; - - let mut query_builder: QueryBuilder = QueryBuilder::new( - "INSERT INTO events(event_id, version, aggregate_id, event, created_at) ", - ); - - query_builder.push_values(events, |mut b, event| { - b.push_bind(event.event_id) - .push_bind(event.version) - .push_bind(event.aggregate_id) - .push_bind(event.event) - .push_bind(event.created_at); - }); + let version = utils::version_to_i64(version)?; + + let serialised_event = event.event.pickle().map_err(DbError::EventPicklingError)?; + + event_ids_to_insert.push(event_id); + versions_to_insert.push(version); + aggregate_ids_to_insert.push(*id); + plain.push(serialised_event); + created_ats_to_insert.push(Utc::now()); + } + let number_of_items = plain.len(); + let mut cipher = self + .encryption_provider + .encrypt(plain) + .await + .map_err(DbError::Encryption)?; + if cipher.len() != number_of_items { + return Err(DbError::EncryptionProviderReturnedWrongNumberOfItems); + } + events_to_insert.append(&mut cipher); + } - let query = query_builder.build(); + let insert_query = &self.table_config.insert_events_query; - query.execute(&mut *self.inner).await?; + let inserted_ids: Result, sqlx::Error> = sqlx::query(insert_query) + .bind(&event_ids_to_insert[..]) + .bind(&versions_to_insert[..]) + .bind(&aggregate_ids_to_insert[..]) + .bind(&events_to_insert[..]) + .bind(&created_ats_to_insert[..]) + .fetch_all(&mut *self.inner) + .await? + .into_iter() + .map(|row| row.try_get(0)) + .collect(); - Ok(()) + Ok(inserted_ids?) } - /// Returns a snapshot of the aggregate in the database - async fn get_snapshot(&mut self, id: &T::AggregateId) -> Option> { - let json_value = query_as::<_, PartialSnapShotRow>( - "SELECT snapshot from snapshots where aggregate_id = $1", - ) - .bind(id) - .fetch_one(&mut *self.inner) - .await - .ok()?; + /// Stores a snapshot of the aggregate in the database + async fn store_snapshot(&mut self, snapshot: Snapshot) -> Result<(), Self::DbError> { + let aggregated_id = *snapshot.aggregate.aggregate_id(); + let aggregate = snapshot + .aggregate + .pickle() + .map_err(DbError::SnapshotPicklingError)?; + let mut cipher = self + .encryption_provider + .encrypt(vec![aggregate]) + .await + .map_err(DbError::Encryption)? + .into_iter(); + let Some(aggregate) = cipher.next() else { + return Err(DbError::EncryptionProviderReturnedWrongNumberOfItems); + }; + if cipher.next().is_some() { + return Err(DbError::EncryptionProviderReturnedWrongNumberOfItems); + } - serde_json::from_value(json_value.snapshot).ok() - } + let upsert_query = &self.table_config.upsert_snapshot_query; - /// Stores a snapshot of the aggregate in the database - async fn store_snapshot(&mut self, snapshot: Snapshot) -> Result<(), Self::DbError> - where - T: Serialize, - { - let aggregated_id = snapshot.aggregate.aggregate_id().clone(); - let json_value = serde_json::to_value(snapshot).map_err(DbError::SerializationError)?; - query("INSERT INTO snapshots(aggregate_id, snapshot, created_at) VALUES ($1, $2, $3) ON CONFLICT (aggregate_id) DO UPDATE SET snapshot = $2, created_at = $3") + sqlx::query(upsert_query) .bind(aggregated_id) - .bind(json_value) + .bind(aggregate) + .bind(utils::version_to_i64(snapshot.version)?) + .bind(utils::snapshot_version_to_i64(snapshot.snapshot_version)?) .bind(Utc::now()) .execute(&mut *self.inner) .await?; @@ -326,48 +258,15 @@ where Ok(()) } - /// Insert side effects into the database + /// Stores side effects into the database #[doc(hidden)] - async fn insert_side_effects( + async fn store_side_effects( &mut self, outbox_item: Vec, - ) -> Result<(), Self::DbError> - where - T::SideEffect: Serialize, - { - if outbox_item.is_empty() { - return Ok(()); - } - - let mut query_builder: QueryBuilder = - QueryBuilder::new("INSERT INTO outbox(id, message, retries, requeue, created_at) "); - - let outbox_item = outbox_item - .into_iter() - .map(|item| { - Ok(( - item.id().clone(), - serde_json::to_value(item).map_err(DbError::SerializationError)?, - )) - }) - .collect::, DbError>>()?; - - query_builder.push_values(outbox_item, |mut b, item| { - b.push_bind(item.0) - .push_bind(item.1) - .push_bind(0) - .push_bind(true) - .push_bind(Utc::now()); - }); - - let query = query_builder.build(); - - query.execute(&mut *self.inner).await?; - Ok(()) - } - - /// Commit the transaction to the db. - async fn commit(self) -> Result<(), Self::DbError> { - Ok(self.commit().await?) + ) -> Result<(), Self::DbError> { + self.outbox + .store_side_effects(&mut self.inner, outbox_item) + .await + .map_err(|e| e.into()) } } diff --git a/eventastic_postgres/tests/common/encryption.rs b/eventastic_postgres/tests/common/encryption.rs new file mode 100644 index 0000000..9caba67 --- /dev/null +++ b/eventastic_postgres/tests/common/encryption.rs @@ -0,0 +1,89 @@ +use async_trait::async_trait; +use eventastic_postgres::EncryptionProvider; + +/// Doesn't actually encrypt just does one XOR with the series 0, 1, 2..255, 0.. +/// to encrypt it then decrypts it by doing the same operation again. (A XOR B) +/// XOR B = A. +#[derive(Clone)] +pub struct TestEncryptionProvider; + +#[async_trait] +impl EncryptionProvider for TestEncryptionProvider { + type Error = TestEncryptionError; + + async fn encrypt(&self, plain: Vec>) -> Result>, Self::Error> { + Ok(plain + .into_iter() + .map(|plain| { + plain + .into_iter() + .enumerate() + .map(|(key, plain)| plain ^ (key as u8)) + .collect() + }) + .collect()) + } + + async fn decrypt(&self, cipher: Vec>) -> Result>, Self::Error> { + Ok(cipher + .into_iter() + .map(|cipher| { + cipher + .into_iter() + .enumerate() + .map(|(key, cipher)| cipher ^ (key as u8)) + .collect() + }) + .collect()) + } + + fn max_batch_size(&self) -> usize { + 42 + } +} + +#[derive(Debug)] +pub struct TestEncryptionError; + +impl std::fmt::Display for TestEncryptionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "TestEncryptionError") + } +} + +impl std::error::Error for TestEncryptionError {} + +#[tokio::test] +async fn encryption_changes_the_data() { + // Arrange + let encryption_provider = TestEncryptionProvider; + let plain = b"Hello, World!"; + + // Act + let cipher = encryption_provider + .encrypt(vec![plain.into()]) + .await + .unwrap(); + + // Assert + assert_eq!(cipher.len(), 1); + assert_ne!(&cipher[0], plain); +} + +#[tokio::test] +async fn encrypt_then_decrypt_returns_original_data() { + // Arrange + let encryption_provider = TestEncryptionProvider; + let plain = b"Hello, World!"; + + // Act + let cipher = encryption_provider + .encrypt(vec![plain.into()]) + .await + .unwrap(); + let decrypted = encryption_provider.decrypt(cipher).await.unwrap(); + + // Assert + assert_eq!(decrypted.len(), 1); + assert_eq!(&decrypted[0], plain); +} diff --git a/eventastic_postgres/tests/common/helpers.rs b/eventastic_postgres/tests/common/helpers.rs new file mode 100644 index 0000000..f2146cb --- /dev/null +++ b/eventastic_postgres/tests/common/helpers.rs @@ -0,0 +1,451 @@ +use super::encryption::TestEncryptionProvider; +use super::test_aggregate::{Account, AccountEvent}; +use chrono::{DateTime, Utc}; +use eventastic::aggregate::{Context, Root}; +use eventastic_outbox_postgres::TableOutbox; +use eventastic_postgres::{ + EncryptionProvider, NoEncryption, Pickle, PostgresRepository, TableConfig, +}; +use sqlx::Row; +use sqlx::{pool::PoolOptions, postgres::PgConnectOptions}; +use std::str::FromStr; +use uuid::Uuid; + +pub async fn get_repository() -> PostgresRepository, NoEncryption> +{ + let host = std::env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string()); + let connection_string = format!("postgres://postgres:password@{host}/postgres"); + let connection_options = PgConnectOptions::from_str(connection_string.as_str()) + .expect("Failed to parse connection options"); + + let pool_options = PoolOptions::default(); + + let repo = PostgresRepository::new( + connection_options, + pool_options, + TableConfig::new("events", "snapshots"), + TableOutbox::new(NoEncryption), + NoEncryption, + ) + .await + .expect("Failed to connect to postgres"); + repo.run_migrations() + .await + .expect("Failed to run migrations"); + repo +} + +pub async fn get_encrypted_repository() +-> PostgresRepository, TestEncryptionProvider> { + let host = std::env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string()); + let connection_string = format!("postgres://postgres:password@{host}/postgres"); + let connection_options = PgConnectOptions::from_str(connection_string.as_str()) + .expect("Failed to parse connection options"); + + let pool_options = PoolOptions::default(); + + let repo = PostgresRepository::new( + connection_options, + pool_options, + TableConfig::new("events", "snapshots"), + TableOutbox::new(TestEncryptionProvider), + TestEncryptionProvider, + ) + .await + .expect("Failed to connect to postgres"); + repo.run_migrations() + .await + .expect("Failed to run migrations"); + repo +} + +#[derive(Debug, Clone)] +pub struct SavedSnapshot { + pub version: i64, + pub aggregate: Account, + pub snapshot_version: i64, +} + +pub async fn get_account_snapshot(account_id: Uuid) -> Option { + let repository = get_repository().await; + + let transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let row = sqlx::query( + "SELECT aggregate, version, snapshot_version FROM snapshots where aggregate_id = $1", + ) + .bind(account_id) + .fetch_optional(&mut *transaction.into_inner()) + .await + .expect("Failed to fetch snapshot"); + + row.map(|row| { + let aggregate_bytes: Result, _> = row.try_get("aggregate"); + let version: Result = row.try_get("version"); + let snapshot_version: Result = row.try_get("snapshot_version"); + + SavedSnapshot { + aggregate: Account::unpickle(&aggregate_bytes.unwrap()).unwrap(), + version: version.unwrap(), + snapshot_version: snapshot_version.unwrap(), + } + }) +} + +pub async fn get_account_snapshot_with_version( + account_id: Uuid, + snapshot_version: i64, +) -> Option { + let repository = get_repository().await; + + let transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let row = sqlx::query( + "SELECT aggregate, version, snapshot_version FROM snapshots WHERE aggregate_id = $1 AND snapshot_version = $2", + ) + .bind(account_id) + .bind(snapshot_version) + .fetch_optional(&mut *transaction.into_inner()) + .await + .expect("Failed to fetch snapshot"); + + row.map(|row| { + let aggregate_bytes: Result, _> = row.try_get("aggregate"); + let version: Result = row.try_get("version"); + let snapshot_version: Result = row.try_get("snapshot_version"); + + SavedSnapshot { + aggregate: Account::unpickle(&aggregate_bytes.unwrap()).unwrap(), + version: version.unwrap(), + snapshot_version: snapshot_version.unwrap(), + } + }) +} + +pub async fn get_all_account_snapshots(account_id: Uuid) -> Vec { + let repository = get_repository().await; + + let transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let rows = sqlx::query( + "SELECT aggregate, version, snapshot_version FROM snapshots WHERE aggregate_id = $1 ORDER BY snapshot_version ASC", + ) + .bind(account_id) + .fetch_all(&mut *transaction.into_inner()) + .await + .expect("Failed to fetch snapshots"); + + rows.into_iter() + .map(|row| { + let aggregate_bytes: Result, _> = row.try_get("aggregate"); + let version: Result = row.try_get("version"); + let snapshot_version: Result = row.try_get("snapshot_version"); + + SavedSnapshot { + aggregate: Account::unpickle(&aggregate_bytes.unwrap()).unwrap(), + version: version.unwrap(), + snapshot_version: snapshot_version.unwrap(), + } + }) + .collect() +} + +pub async fn count_account_snapshots(account_id: Uuid) -> usize { + let repository = get_repository().await; + + let transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let row = sqlx::query("SELECT COUNT(*) as count FROM snapshots WHERE aggregate_id = $1") + .bind(account_id) + .fetch_one(&mut *transaction.into_inner()) + .await + .expect("Failed to count snapshots"); + + let count: i64 = row.try_get("count").expect("Failed to get count"); + count as usize +} + +pub async fn replace_account_snapshot(account_id: Uuid, snapshot: SavedSnapshot) { + let repository = get_repository().await; + + let transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let mut pg_transaction = transaction.into_inner(); + + let row = sqlx::query("UPDATE snapshots set aggregate = $1, snapshot_version = $2, version = $3 where aggregate_id = $4") + .bind(snapshot.aggregate.pickle().expect("Failed to serialize snapshot")) + .bind(snapshot.snapshot_version) + .bind(snapshot.version) + .bind(account_id) + .execute(&mut *pg_transaction) + .await + .expect("Failed to update snapshot"); + + assert!(row.rows_affected() == 1, "Failed to update snapshot"); + pg_transaction + .commit() + .await + .expect("Failed to commit transaction"); +} + +pub async fn delete_snapshot(account_id: Uuid) { + let repository = get_repository().await; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction") + .into_inner(); + + sqlx::query("DELETE FROM snapshots WHERE aggregate_id = $1") + .bind(account_id) + .execute(&mut *transaction) + .await + .expect("Failed to delete snapshot"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); +} + +pub async fn insert_snapshot_with_version(account_id: Uuid, snapshot: SavedSnapshot, version: i64) { + let repository = get_repository().await; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction") + .into_inner(); + + sqlx::query("INSERT INTO snapshots (aggregate_id, aggregate, version, snapshot_version, created_at) VALUES ($1, $2, $3, $4, NOW())") + .bind(account_id) + .bind(snapshot.aggregate.pickle().expect("Failed to serialize snapshot")) + .bind(snapshot.version) + .bind(version) + .execute(&mut *transaction) + .await + .expect("Failed to insert snapshot"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); +} + +pub async fn load_account(account_id: Uuid) -> Context { + let repository = get_repository().await; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let context: Context = transaction + .get(&account_id) + .await + .expect("Failed to load account"); + + context +} + +pub async fn get_latest_event_timestamp(account_id: Uuid) -> DateTime { + let repository = get_repository().await; + + let transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let row = + sqlx::query("SELECT MAX(created_at) as created_at FROM events where aggregate_id = $1") + .bind(account_id) + .fetch_one(&mut *transaction.into_inner()) + .await + .expect("Failed to fetch timestamp"); + + row.get("created_at") +} + +pub struct AccountBuilder { + account_id: Uuid, + open_event: AccountEvent, + events: Vec, +} + +impl AccountBuilder { + pub fn new() -> Self { + let account_id = Uuid::new_v4(); + Self { + account_id, + events: Vec::new(), + open_event: AccountEvent::Open { + event_id: Uuid::new_v4(), + account_id, + starting_balance: 0, + email: "user@example.com".into(), + }, + } + } + + pub fn with_email(mut self, new_email: String) -> Self { + if let AccountEvent::Open { ref mut email, .. } = self.open_event { + *email = new_email; + } + self + } + + pub fn with_balance(mut self, balance: i64) -> Self { + if let AccountEvent::Open { + ref mut starting_balance, + .. + } = self.open_event + { + *starting_balance = balance; + } + self + } + + pub fn with_open_event(mut self, event: AccountEvent) -> Self { + self.open_event = event; + self + } + + pub fn with_add_event(mut self, amount: i64) -> Self { + let add_event = AccountEvent::Add { + event_id: Uuid::new_v4(), + amount, + }; + self.events.push(add_event); + self + } + + pub fn with_remove_event(mut self, amount: i64) -> Self { + let remove_event = AccountEvent::Remove { + event_id: Uuid::new_v4(), + amount, + }; + self.events.push(remove_event); + self + } + + pub fn with_event(mut self, event: AccountEvent) -> Self { + self.events.push(event); + self + } + + pub fn build(self) -> Context { + let mut account = + Account::record_new(self.open_event).expect("Failed to record new account"); + + for event in self.events { + account.record_that(event).expect("Failed to apply event"); + } + + account + } + + pub async fn save(self) -> Context { + let repository = get_repository().await; + let mut account = self.build(); + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + transaction + .store(&mut account) + .await + .expect("Failed to save account"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + account + } +} + +pub async fn get_side_effect( + id: uuid::Uuid, + encryption_provider: impl EncryptionProvider, +) -> Option<(super::test_aggregate::SideEffects, i32, bool)> { + let repository = get_repository().await; + let transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let row = sqlx::query("SELECT id, message, retries, requeue FROM outbox WHERE id = $1") + .bind(id) + .fetch_optional(&mut *transaction.into_inner()) + .await + .expect("Failed to query outbox table"); + + if let Some(row) = row { + let message_bytes: Vec = row + .try_get("message") + .expect("Failed to get message from row"); + let retries: i32 = row + .try_get("retries") + .expect("Failed to get retries from row"); + let requeue: bool = row + .try_get("requeue") + .expect("Failed to get requeue from row"); + + let plain = encryption_provider + .decrypt(vec![message_bytes]) + .await + .unwrap(); + assert!(plain.len() == 1); + let plain = &plain[0]; + let side_effect: super::test_aggregate::SideEffects = + super::test_aggregate::SideEffects::unpickle(plain) + .expect("Failed to deserialize side effect"); + + Some((side_effect, retries, requeue)) + } else { + None + } +} + +// Helper function to create an account with many events efficiently +pub async fn create_account_with_many_events( + account_id: Uuid, + num_events: usize, +) -> Context { + let mut builder = AccountBuilder::new().with_open_event(AccountEvent::Open { + account_id, + event_id: Uuid::new_v4(), + email: "test@example.com".to_string(), + starting_balance: 1000, + }); + + // Add alternating add and remove events + for i in 0..num_events { + if i % 2 == 0 { + builder = builder.with_add_event(10); + } else { + builder = builder.with_remove_event(5); + } + } + + builder.build() +} diff --git a/eventastic_postgres/tests/common/mod.rs b/eventastic_postgres/tests/common/mod.rs new file mode 100644 index 0000000..7bf30b2 --- /dev/null +++ b/eventastic_postgres/tests/common/mod.rs @@ -0,0 +1,6 @@ +#![allow(dead_code)] + +pub mod encryption; +pub mod helpers; +pub mod test_aggregate; +pub mod test_order_aggregate; diff --git a/eventastic_postgres/tests/common/test_aggregate.rs b/eventastic_postgres/tests/common/test_aggregate.rs new file mode 100644 index 0000000..da79c7f --- /dev/null +++ b/eventastic_postgres/tests/common/test_aggregate.rs @@ -0,0 +1,171 @@ +use eventastic::aggregate::Aggregate; +use eventastic::aggregate::SideEffect; +use eventastic::event::DomainEvent; +use serde::Deserialize; +use serde::Serialize; +use thiserror::Error; +use uuid::Uuid; + +// Define our aggregate +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Account { + pub account_id: Uuid, + pub balance: i64, +} + +// Define our domain events +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +pub enum AccountEvent { + Open { + account_id: Uuid, + event_id: Uuid, + email: String, + starting_balance: i64, + }, + Add { + event_id: Uuid, + amount: i64, + }, + Remove { + event_id: Uuid, + amount: i64, + }, +} + +impl DomainEvent for AccountEvent { + type EventId = Uuid; + fn id(&self) -> &Uuid { + match self { + AccountEvent::Open { event_id, .. } + | AccountEvent::Add { event_id, .. } + | AccountEvent::Remove { event_id, .. } => event_id, + } + } +} + +// Define our domain error +// Generally it's expected that applying an event is infallible as the business logic should be done in the command handlers +// But some events could cause an error and returning an error is probably better than panicking +#[derive(Error, Debug)] +pub enum DomainError { + #[error("This event can't be applied given the current state of the aggregate")] + InvalidState, +} + +// Define our side effects +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +pub enum SideEffects { + PublishMessage { + id: Uuid, + message: String, + }, + SendEmail { + id: Uuid, + address: String, + content: String, + }, +} + +impl SideEffect for SideEffects { + /// The type used to uniquely identify this side effect. + type SideEffectId = Uuid; + + fn id(&self) -> &Self::SideEffectId { + match self { + SideEffects::PublishMessage { id, .. } | SideEffects::SendEmail { id, .. } => id, + } + } +} + +// Implement the aggregate trait for our aggregate struct +impl Aggregate for Account { + /// The current version of the snapshot to store. + /// This number should be increased when a breaking change is made to the apply functions. + const SNAPSHOT_VERSION: u64 = 2; + + /// The type used to uniquely identify the Aggregate. + type AggregateId = Uuid; + + /// The type of Domain Events that interest this Aggregate. + /// Usually, this type should be an `enum`. + type DomainEvent = AccountEvent; + + /// The error type that can be returned by [`Aggregate::apply`] when + /// mutating the Aggregate state. + type ApplyError = DomainError; + + /// The type of side effect that this aggregate can produce. + /// Usually, this type should be an `enum`. + type SideEffect = SideEffects; + + /// Returns the unique identifier for the Aggregate instance. + fn aggregate_id(&self) -> &Self::AggregateId { + &self.account_id + } + + /// Mutates the state of an Aggregate through a Domain Event. + /// + /// # Errors + /// + /// The method can return an error if the event to apply is unexpected + /// given the current state of the Aggregate. + fn apply(&mut self, event: &Self::DomainEvent) -> Result<(), Self::ApplyError> { + match event { + AccountEvent::Add { amount, .. } => { + self.balance += amount; + } + AccountEvent::Remove { amount, .. } => { + self.balance -= amount; + } + AccountEvent::Open { .. } => return Err(Self::ApplyError::InvalidState), + } + Ok(()) + } + + /// Create a new Aggregate through a Domain Event. + /// + /// # Errors + /// + /// The method can return an error if the event to apply is unexpected + /// given the current state of the Aggregate. + fn apply_new(event: &Self::DomainEvent) -> Result { + match event { + AccountEvent::Open { + account_id, + starting_balance, + .. + } => Ok(Self { + account_id: *account_id, + balance: *starting_balance, + }), + AccountEvent::Add { .. } | AccountEvent::Remove { .. } => { + Err(Self::ApplyError::InvalidState) + } + } + } + + /// Generates a list of side effects for this given aggregate and domain event + /// The domain event has already been applied to the aggregate + fn side_effects(&self, event: &Self::DomainEvent) -> Option> { + let side_effect = match event { + AccountEvent::Open { + account_id, + event_id, + email, + starting_balance, + } => Some(SideEffects::SendEmail { + id: *event_id, + address: email.clone(), + content: format!( + "Account opened with id {account_id} and starting balance {starting_balance}" + ), + }), + AccountEvent::Add { event_id, amount } => Some(SideEffects::PublishMessage { + id: *event_id, + message: amount.to_string(), + }), + AccountEvent::Remove { .. } => None, + }; + side_effect.map(|s| vec![s]) + } +} diff --git a/eventastic_postgres/tests/common/test_order_aggregate.rs b/eventastic_postgres/tests/common/test_order_aggregate.rs new file mode 100644 index 0000000..cf6b6d1 --- /dev/null +++ b/eventastic_postgres/tests/common/test_order_aggregate.rs @@ -0,0 +1,185 @@ +use eventastic::aggregate::Aggregate; +use eventastic::aggregate::SideEffect; +use eventastic::event::DomainEvent; +use serde::Deserialize; +use serde::Serialize; +use thiserror::Error; +use uuid::Uuid; + +// Define our Order aggregate - different from Account +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Order { + pub order_id: Uuid, + pub customer_id: Uuid, + pub total_amount: i64, + pub status: OrderStatus, +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum OrderStatus { + Pending, + Confirmed, + Shipped, + Delivered, + Cancelled, +} + +// Define our domain events for Order +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +pub enum OrderEvent { + Created { + order_id: Uuid, + event_id: Uuid, + customer_id: Uuid, + total_amount: i64, + }, + Confirmed { + event_id: Uuid, + }, + Shipped { + event_id: Uuid, + tracking_number: String, + }, + Delivered { + event_id: Uuid, + }, + Cancelled { + event_id: Uuid, + reason: String, + }, +} + +impl DomainEvent for OrderEvent { + type EventId = Uuid; + fn id(&self) -> &Uuid { + match self { + OrderEvent::Created { event_id, .. } + | OrderEvent::Confirmed { event_id, .. } + | OrderEvent::Shipped { event_id, .. } + | OrderEvent::Delivered { event_id, .. } + | OrderEvent::Cancelled { event_id, .. } => event_id, + } + } +} + +// Define our domain error for Order +#[derive(Error, Debug)] +pub enum OrderDomainError { + #[error("This event can't be applied given the current state of the order")] + InvalidState, + #[error("Order is already in final state")] + AlreadyFinalized, +} + +// Define our side effects for Order - different from Account side effects +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +pub enum OrderSideEffects { + SendConfirmationEmail { + id: Uuid, + customer_email: String, + order_id: Uuid, + }, + NotifyWarehouse { + id: Uuid, + order_id: Uuid, + items: Vec, + }, + UpdateInventory { + id: Uuid, + product_ids: Vec, + quantities: Vec, + }, +} + +impl SideEffect for OrderSideEffects { + type SideEffectId = Uuid; + + fn id(&self) -> &Self::SideEffectId { + match self { + OrderSideEffects::SendConfirmationEmail { id, .. } + | OrderSideEffects::NotifyWarehouse { id, .. } + | OrderSideEffects::UpdateInventory { id, .. } => id, + } + } +} + +// Implement the aggregate trait for our Order struct +impl Aggregate for Order { + const SNAPSHOT_VERSION: u64 = 1; + + type AggregateId = Uuid; + type DomainEvent = OrderEvent; + type ApplyError = OrderDomainError; + type SideEffect = OrderSideEffects; + + fn aggregate_id(&self) -> &Self::AggregateId { + &self.order_id + } + + fn apply(&mut self, event: &Self::DomainEvent) -> Result<(), Self::ApplyError> { + match event { + OrderEvent::Confirmed { .. } => { + if self.status != OrderStatus::Pending { + return Err(Self::ApplyError::InvalidState); + } + self.status = OrderStatus::Confirmed; + } + OrderEvent::Shipped { .. } => { + if self.status != OrderStatus::Confirmed { + return Err(Self::ApplyError::InvalidState); + } + self.status = OrderStatus::Shipped; + } + OrderEvent::Delivered { .. } => { + if self.status != OrderStatus::Shipped { + return Err(Self::ApplyError::InvalidState); + } + self.status = OrderStatus::Delivered; + } + OrderEvent::Cancelled { .. } => { + if matches!(self.status, OrderStatus::Delivered | OrderStatus::Cancelled) { + return Err(Self::ApplyError::AlreadyFinalized); + } + self.status = OrderStatus::Cancelled; + } + OrderEvent::Created { .. } => return Err(Self::ApplyError::InvalidState), + } + Ok(()) + } + + fn apply_new(event: &Self::DomainEvent) -> Result { + match event { + OrderEvent::Created { + order_id, + customer_id, + total_amount, + .. + } => Ok(Self { + order_id: *order_id, + customer_id: *customer_id, + total_amount: *total_amount, + status: OrderStatus::Pending, + }), + _ => Err(Self::ApplyError::InvalidState), + } + } + + fn side_effects(&self, event: &Self::DomainEvent) -> Option> { + let side_effect = match event { + OrderEvent::Created { event_id, .. } => Some(OrderSideEffects::SendConfirmationEmail { + id: *event_id, + customer_email: "customer@example.com".to_string(), + order_id: self.order_id, + }), + OrderEvent::Confirmed { event_id } => Some(OrderSideEffects::NotifyWarehouse { + id: *event_id, + order_id: self.order_id, + items: vec!["item1".to_string(), "item2".to_string()], + }), + OrderEvent::Shipped { .. } + | OrderEvent::Delivered { .. } + | OrderEvent::Cancelled { .. } => None, + }; + side_effect.map(|s| vec![s]) + } +} diff --git a/eventastic_postgres/tests/encryption.rs b/eventastic_postgres/tests/encryption.rs new file mode 100644 index 0000000..7e2a9c5 --- /dev/null +++ b/eventastic_postgres/tests/encryption.rs @@ -0,0 +1,304 @@ +use common::{ + encryption::TestEncryptionProvider, + helpers::{AccountBuilder, get_encrypted_repository, get_repository, get_side_effect}, + test_aggregate::{Account, AccountEvent, SideEffects}, +}; +use eventastic::aggregate::{Context, Root}; +use eventastic::repository::RepositoryReader; +use eventastic_outbox_postgres::TableOutbox; +use eventastic_postgres::{DbError, NoEncryption, PostgresRepository}; +use futures::StreamExt; +use uuid::Uuid; + +mod common; + +#[tokio::test] +async fn when_encryption_is_enabled_aggregate_can_be_saved_and_loaded() { + // Arrange + let repository = get_encrypted_repository().await; + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to commit transaction"); + let created_account = account.state(); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let loaded_account = load_encrypted_account(account_id).await; + let loaded_account = loaded_account.state(); + + assert_eq!(created_account, loaded_account); +} + +#[tokio::test] +async fn when_encryption_is_enabled_events_can_be_saved_and_loaded_by_id() { + // Arrange + let repository = get_encrypted_repository().await; + + let account_id = Uuid::new_v4(); + let event_id = Uuid::new_v4(); + let email = "test@example.com".to_string(); + let starting_balance = 100; + let open_event = AccountEvent::Open { + account_id, + event_id, + email: email.clone(), + starting_balance, + }; + + let mut account = Account::record_new(open_event.clone()).unwrap(); + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to commit transaction"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let mut repository = get_encrypted_repository().await; + let result = , + TestEncryptionProvider, + > as RepositoryReader>::get_event( + &mut repository, &account_id, &event_id + ) + .await; + assert!(matches!(result, Ok(Some(e)) if e.event == open_event)); +} + +#[tokio::test] +async fn when_encryption_is_enabled_events_cannot_be_loaded_by_id_without_encryption() { + // Arrange + let repository = get_encrypted_repository().await; + + let account_id = Uuid::new_v4(); + let event_id = Uuid::new_v4(); + let email = "test@example.com".to_string(); + let starting_balance = 100; + let open_event = AccountEvent::Open { + account_id, + event_id, + email: email.clone(), + starting_balance, + }; + + let mut account = Account::record_new(open_event).unwrap(); + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to commit transaction"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let mut repository = get_repository().await; + let result = + , NoEncryption> as RepositoryReader< + Account, + >>::get_event(&mut repository, &account_id, &event_id) + .await; + assert!(matches!(result, Err(DbError::EventPicklingError(_)))); +} + +#[tokio::test] +async fn when_encryption_is_enabled_events_can_be_saved_and_loaded() { + // Arrange + let repository = get_encrypted_repository().await; + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to commit transaction"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let mut repository = get_encrypted_repository().await; + let mut events = , + TestEncryptionProvider, + > as RepositoryReader>::stream_from( + &mut repository, &account_id, 0 + ); + while let Some(event) = events.next().await { + assert!(event.is_ok()); + } +} + +#[tokio::test] +async fn when_encryption_is_enabled_events_cannot_be_loaded_without_encryption() { + // Arrange + let repository = get_encrypted_repository().await; + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to commit transaction"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let mut repository = get_repository().await; + let mut events = + , NoEncryption> as RepositoryReader< + Account, + >>::stream_from(&mut repository, &account_id, 0); + while let Some(event) = events.next().await { + assert!(matches!( + event, + Err(eventastic_postgres::DbError::EventPicklingError(_)) + )); + } +} + +#[tokio::test] +async fn when_encryption_is_enabled_aggregate_cannot_be_loaded_without_encryption() { + // Arrange + let repository = get_encrypted_repository().await; + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to commit transaction"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let repository = get_repository().await; + let mut transaction = repository.begin_transaction().await.unwrap(); + + assert!(matches!( + transaction.get(&account_id).await, + Err(eventastic::repository::RepositoryError::Repository( + eventastic_postgres::DbError::SnapshotPicklingError(_) + )), + )); +} + +#[tokio::test] +async fn when_encryption_is_enabled_side_effect_can_be_saved_and_loaded() { + // Arrange + let repository = get_encrypted_repository().await; + + let account_id = Uuid::new_v4(); + let event_id = Uuid::new_v4(); + let email = "test@example.com".to_string(); + let starting_balance = 100; + let open_event = AccountEvent::Open { + account_id, + event_id, + email: email.clone(), + starting_balance, + }; + + let mut account = Account::record_new(open_event).unwrap(); + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to commit transaction"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let (side_effect, _retries, _requeue) = get_side_effect(event_id, TestEncryptionProvider) + .await + .expect("Side effect should be stored in outbox table"); + match side_effect { + SideEffects::SendEmail { + id: side_effect_id, + address, + content, + } => { + assert_eq!(side_effect_id, event_id); + assert_eq!(address, email); + assert!(content.contains(&account_id.to_string())); + assert!(content.contains(&starting_balance.to_string())); + } + _ => panic!("Expected SendEmail side effect"), + } +} + +async fn load_encrypted_account(account_id: Uuid) -> Context { + let repository = get_encrypted_repository().await; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + transaction + .get(&account_id) + .await + .expect("Failed to encrypted load account") +} diff --git a/eventastic_postgres/tests/multi_aggregate.rs b/eventastic_postgres/tests/multi_aggregate.rs new file mode 100644 index 0000000..b26f206 --- /dev/null +++ b/eventastic_postgres/tests/multi_aggregate.rs @@ -0,0 +1,300 @@ +mod common; + +use common::helpers::get_repository; +use common::test_aggregate::{Account, AccountEvent}; +use common::test_order_aggregate::{Order, OrderEvent, OrderStatus}; +use eventastic::aggregate::Root; +use eventastic_outbox_postgres::TableOutbox; +use eventastic_postgres::PostgresRepository; +use eventastic_postgres::{NoEncryption, TableConfig}; +use sqlx::pool::PoolOptions; +use sqlx::postgres::PgConnectOptions; +use std::str::FromStr; +use uuid::Uuid; + +// Helper function to get an order repository using the same pool +async fn get_order_repository() -> PostgresRepository, NoEncryption> +{ + let host = std::env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".to_string()); + let connection_string = format!("postgres://postgres:password@{host}/postgres"); + let connection_options = + PgConnectOptions::from_str(&connection_string).expect("Failed to parse connection options"); + + let pool_options = PoolOptions::default(); + + PostgresRepository::new( + connection_options, + pool_options, + TableConfig::new("events", "snapshots"), + TableOutbox::new(NoEncryption), + NoEncryption, + ) + .await + .expect("Failed to connect to postgres") +} + +#[tokio::test] +pub async fn multi_aggregate_transaction_commit_test() { + // Arrange + let account_repo = get_repository().await; + let order_repo = get_order_repository().await; + + let account_id = Uuid::new_v4(); + let order_id = Uuid::new_v4(); + let customer_id = Uuid::new_v4(); + + // Start with account repository transaction + let mut account_tx = account_repo + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Create and store account + let account_event = AccountEvent::Open { + event_id: Uuid::new_v4(), + account_id, + email: "test@example.com".to_string(), + starting_balance: 1000, + }; + let mut account = Account::record_new(account_event).expect("Failed to create account"); + account_tx + .store(&mut account) + .await + .expect("Failed to store account"); + + // Get the raw transaction and pass it to order repository + let raw_tx = account_tx.into_inner(); + let mut order_tx = order_repo.transaction_from(raw_tx); + + // Create and store order + let order_event = OrderEvent::Created { + event_id: Uuid::new_v4(), + order_id, + customer_id, + total_amount: 500, + }; + let mut order = Order::record_new(order_event).expect("Failed to create order"); + order_tx + .store(&mut order) + .await + .expect("Failed to store order"); + + // Commit the transaction + order_tx + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert - verify both aggregates were saved + let mut account_load_tx = account_repo + .begin_transaction() + .await + .expect("Failed to begin load transaction"); + let loaded_account = account_load_tx + .get(&account_id) + .await + .expect("Failed to load account"); + assert_eq!(loaded_account.state().account_id, account_id); + assert_eq!(loaded_account.state().balance, 1000); + account_load_tx + .commit() + .await + .expect("Failed to commit load transaction"); + + let mut order_load_tx = order_repo + .begin_transaction() + .await + .expect("Failed to begin order load transaction"); + let loaded_order = order_load_tx + .get(&order_id) + .await + .expect("Failed to load order"); + assert_eq!(loaded_order.state().order_id, order_id); + assert_eq!(loaded_order.state().total_amount, 500); + assert_eq!(loaded_order.state().status, OrderStatus::Pending); + order_load_tx + .commit() + .await + .expect("Failed to commit order load transaction"); +} + +#[tokio::test] +pub async fn multi_aggregate_transaction_rollback_test() { + // Arrange + let account_repo = get_repository().await; + let order_repo = get_order_repository().await; + + let account_id = Uuid::new_v4(); + let order_id = Uuid::new_v4(); + let customer_id = Uuid::new_v4(); + + // Start with account repository transaction + let mut account_tx = account_repo + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Create and store account + let account_event = AccountEvent::Open { + event_id: Uuid::new_v4(), + account_id, + email: "test@example.com".to_string(), + starting_balance: 1000, + }; + let mut account = Account::record_new(account_event).expect("Failed to create account"); + account_tx + .store(&mut account) + .await + .expect("Failed to store account"); + + // Get the raw transaction and pass it to order repository + let raw_tx = account_tx.into_inner(); + let mut order_tx = order_repo.transaction_from(raw_tx); + + // Create and store order + let order_event = OrderEvent::Created { + event_id: Uuid::new_v4(), + order_id, + customer_id, + total_amount: 500, + }; + let mut order = Order::record_new(order_event).expect("Failed to create order"); + order_tx + .store(&mut order) + .await + .expect("Failed to store order"); + + // Rollback the transaction instead of committing + order_tx + .rollback() + .await + .expect("Failed to rollback transaction"); + + // Assert - verify neither aggregate was saved + let mut account_load_tx = account_repo + .begin_transaction() + .await + .expect("Failed to begin load transaction"); + let account_result = account_load_tx.get(&account_id).await; + assert!( + account_result.is_err(), + "Account should not exist after rollback" + ); + account_load_tx + .rollback() + .await + .expect("Failed to rollback load transaction"); + + let mut order_load_tx = order_repo + .begin_transaction() + .await + .expect("Failed to begin order load transaction"); + let order_result = order_load_tx.get(&order_id).await; + assert!( + order_result.is_err(), + "Order should not exist after rollback" + ); + order_load_tx + .rollback() + .await + .expect("Failed to rollback order load transaction"); +} + +#[tokio::test] +pub async fn multi_aggregate_transaction_with_mixed_side_effects() { + // Arrange + let account_repo = get_repository().await; + let order_repo = get_order_repository().await; + + let account_id = Uuid::new_v4(); + let order_id = Uuid::new_v4(); + let customer_id = Uuid::new_v4(); + + // Start with account repository transaction + let mut account_tx = account_repo + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Create account and add money (generates side effects) + let account_open_event = AccountEvent::Open { + event_id: Uuid::new_v4(), + account_id, + email: "test@example.com".to_string(), + starting_balance: 1000, + }; + let mut account = Account::record_new(account_open_event).expect("Failed to create account"); + + let add_event = AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 500, + }; + account + .record_that(add_event) + .expect("Failed to add money to account"); + + account_tx + .store(&mut account) + .await + .expect("Failed to store account"); + + // Get the raw transaction and pass it to order repository + let raw_tx = account_tx.into_inner(); + let mut order_tx = order_repo.transaction_from(raw_tx); + + // Create and confirm order (generates different side effects) + let order_event = OrderEvent::Created { + event_id: Uuid::new_v4(), + order_id, + customer_id, + total_amount: 500, + }; + let mut order = Order::record_new(order_event).expect("Failed to create order"); + + let confirm_event = OrderEvent::Confirmed { + event_id: Uuid::new_v4(), + }; + order + .record_that(confirm_event) + .expect("Failed to confirm order"); + + order_tx + .store(&mut order) + .await + .expect("Failed to store order"); + + // Commit the transaction + order_tx + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert - verify both aggregates were saved with correct states + let mut account_load_tx = account_repo + .begin_transaction() + .await + .expect("Failed to begin load transaction"); + let loaded_account = account_load_tx + .get(&account_id) + .await + .expect("Failed to load account"); + assert_eq!(loaded_account.state().balance, 1500); // 1000 + 500 + account_load_tx + .commit() + .await + .expect("Failed to commit load transaction"); + + let mut order_load_tx = order_repo + .begin_transaction() + .await + .expect("Failed to begin order load transaction"); + let loaded_order = order_load_tx + .get(&order_id) + .await + .expect("Failed to load order"); + assert_eq!(loaded_order.state().status, OrderStatus::Confirmed); + order_load_tx + .commit() + .await + .expect("Failed to commit order load transaction"); +} diff --git a/eventastic_postgres/tests/postgres.rs b/eventastic_postgres/tests/postgres.rs new file mode 100644 index 0000000..6f0bdf2 --- /dev/null +++ b/eventastic_postgres/tests/postgres.rs @@ -0,0 +1,520 @@ +mod common; + +use common::helpers::{AccountBuilder, get_latest_event_timestamp, get_repository, load_account}; +use common::test_aggregate::AccountEvent; +use eventastic::aggregate::Context; +use eventastic::repository::RepositoryReader; +use futures::StreamExt; +use uuid::Uuid; + +use crate::common::helpers::create_account_with_many_events; +use crate::common::test_aggregate::Account; + +#[tokio::test] +pub async fn aggregate_is_successfully_saved_and_loaded() { + // Arrange + let repository = get_repository().await; + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + let created_account = account.state(); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + + let loaded_account = load_account(account_id).await; + let loaded_account = loaded_account.state(); + + assert_eq!(created_account, loaded_account); +} + +#[tokio::test] +pub async fn aggregate_is_not_saved_if_no_events_are_applied() { + // Arrange + let repository = get_repository().await; + let mut account = AccountBuilder::new().save().await; + let account_id = account.state().account_id; + + let event_time_stamp = get_latest_event_timestamp(account_id).await; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + + assert_eq!( + event_time_stamp, + get_latest_event_timestamp(account_id).await + ); +} + +#[tokio::test] +pub async fn transaction_rollback_discards_changes() { + // Arrange + let repository = get_repository().await; + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Apply event and save to the transaction but don't commit + account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 100, + }) + .expect("Failed to apply event"); + + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + // Roll back the transaction instead of committing + transaction + .rollback() + .await + .expect("Failed to rollback transaction"); + + // Act - Try to load the account from the database + // Start a new transaction + let mut load_transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Assert + // The account should not exist in the database since we rolled back + let load_result = + Context::::load(&mut load_transaction, &account_id).await; + assert!( + load_result.is_err(), + "Account should not exist after rollback" + ); + + load_transaction + .commit() + .await + .expect("Failed to commit transaction"); +} + +#[tokio::test] +pub async fn transaction_isolates_changes_until_commit() { + // Arrange + let repository = get_repository().await; + + // Create an account in one transaction + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + // Record an event that adds funds + account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 50, + }) + .expect("Failed to apply event"); + + // Begin a transaction to save the account with the event + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Save but don't commit yet + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + // Act - Try to read the account from a different transaction before committing + let mut concurrent_transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // The account should not be visible in the other transaction yet + let load_result = + Context::::load(&mut concurrent_transaction, &account_id) + .await; + + // Assert + assert!( + load_result.is_err(), + "Account should not be visible in another transaction before commit" + ); + + concurrent_transaction + .commit() + .await + .expect("Failed to commit concurrent transaction"); + + // Now commit the original transaction + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Verify the account is now visible + let mut verification_transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let loaded_account = Context::::load( + &mut verification_transaction, + &account_id, + ) + .await + .expect("Failed to load account after commit"); + + assert_eq!(loaded_account.state().balance, 50); + + verification_transaction + .commit() + .await + .expect("Failed to commit verification transaction"); +} + +#[tokio::test] +pub async fn transaction_handles_invalid_data_gracefully() { + // Arrange + let repository = get_repository().await; + + // Create a valid account first + let mut account = AccountBuilder::new().save().await; + let account_id = account.state().account_id; + + // Begin a transaction + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Try to apply an invalid event (Open event on an existing account) + // This should be rejected by the apply() method in the Account aggregate + account + .record_that(AccountEvent::Open { + account_id, + event_id: Uuid::new_v4(), + email: "test@example.com".to_string(), + starting_balance: 50, + }) + .expect_err("Should fail to apply Open event on existing account"); + + // The account should remain unchanged and still be savable + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Verify the account is still in its original state + let loaded_account = load_account(account_id).await; + assert_eq!(account.state().balance, loaded_account.state().balance); +} + +#[tokio::test] +pub async fn transaction_handles_multiple_operations_correctly() { + // Arrange + let repository = get_repository().await; + + // Create an account with initial events + let mut account = AccountBuilder::new().with_add_event(100).save().await; + let account_id = account.state().account_id; + let initial_balance = account.state().balance; + + // Start a transaction that will perform multiple operations + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // First operation: Add funds + account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 50, + }) + .expect("Failed to apply Add event"); + + // Save the changes but keep the transaction open + account + .save(&mut transaction) + .await + .expect("Failed to save account after first operation"); + + // Second operation: Remove funds + account + .record_that(AccountEvent::Remove { + event_id: Uuid::new_v4(), + amount: 25, + }) + .expect("Failed to apply Remove event"); + + // Save again in the same transaction + account + .save(&mut transaction) + .await + .expect("Failed to save account after second operation"); + + // Finally commit the transaction + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Verify that both operations were applied correctly + let loaded_account = load_account(account_id).await; + assert_eq!( + initial_balance + 50 - 25, + loaded_account.state().balance, + "Account balance should reflect all operations in the transaction" + ); +} + +#[tokio::test] +pub async fn transaction_handles_multiple_aggregates_correctly() { + // Arrange + let repository = get_repository().await; + + // Create two separate accounts + let mut account1 = AccountBuilder::new().save().await; + let account_id1 = account1.state().account_id; + + let mut account2 = AccountBuilder::new().save().await; + let account_id2 = account2.state().account_id; + + // Apply changes to both accounts in the same transaction + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Update first account + account1 + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 100, + }) + .expect("Failed to apply event to first account"); + + account1 + .save(&mut transaction) + .await + .expect("Failed to save first account"); + + // Update second account in the same transaction + account2 + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 200, + }) + .expect("Failed to apply event to second account"); + + account2 + .save(&mut transaction) + .await + .expect("Failed to save second account"); + + // Either both accounts should be updated or neither (atomic transaction) + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Verify both accounts were updated correctly + let loaded_account1 = load_account(account_id1).await; + let loaded_account2 = load_account(account_id2).await; + + assert_eq!(loaded_account1.state().balance, account1.state().balance); + assert_eq!(loaded_account2.state().balance, account2.state().balance); +} + +#[tokio::test] +pub async fn repository_error_handling_and_recovery() { + // Arrange + let repository = get_repository().await; + + // Create an account + let mut account = AccountBuilder::new().save().await; + let account_id = account.state().account_id; + let initial_balance = account.state().balance; + + // Start a transaction + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Apply an event + account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 50, + }) + .expect("Failed to apply Add event"); + + // Save changes + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + // Simulate an error by rolling back instead of committing + transaction + .rollback() + .await + .expect("Failed to rollback transaction"); + + // Recovery: create a new transaction and retry the operation + let mut recovery_transaction = repository + .begin_transaction() + .await + .expect("Failed to begin recovery transaction"); + + // Reload the account (should be in original state) + let mut reloaded_account = load_account(account_id).await; + assert_eq!( + reloaded_account.state().balance, + initial_balance, + "Account should be in original state after rollback" + ); + + // Retry the operation + reloaded_account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 75, // Different amount this time + }) + .expect("Failed to apply event during recovery"); + + // Save and commit + reloaded_account + .save(&mut recovery_transaction) + .await + .expect("Failed to save account during recovery"); + + recovery_transaction + .commit() + .await + .expect("Failed to commit recovery transaction"); + + // Verify the recovery operation was successful + let final_account = load_account(account_id).await; + assert_eq!( + final_account.state().balance, + initial_balance + 75, + "Account balance should reflect recovery operation" + ); +} + +#[tokio::test] +pub async fn repository_load_works_without_transaction() { + use common::test_aggregate::Account; + use eventastic::repository::Repository; + + // Arrange + let repository = get_repository().await; + let account = AccountBuilder::new() + .with_add_event(100) + .with_remove_event(20) + .save() + .await; + let account_id = account.state().account_id; + let expected_balance = account.state().balance; + + // Act - Load using the new Repository::load method (no transaction needed) + let loaded_account: Context = repository + .load(&account_id) + .await + .expect("Failed to load account using Repository::load"); + + // Assert + assert_eq!(loaded_account.state().account_id, account_id); + assert_eq!(loaded_account.state().balance, expected_balance); + + // Verify it loads the same data as the transaction-based approach + let transaction_loaded_account = load_account(account_id).await; + assert_eq!(loaded_account.state(), transaction_loaded_account.state()); +} + +#[tokio::test] +async fn streaming_returns_events_in_version_order() { + let repository = get_repository().await; + let account_id = Uuid::new_v4(); + + // Create an account with many events + let mut account = create_account_with_many_events(account_id, 150_000).await; + + // Save the account + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + transaction + .store(&mut account) + .await + .expect("Failed to save account"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + let mut event_count = 0; + + let mut events_stream = + RepositoryReader::::stream_from(&mut transaction, &account_id, 0); + + // Process events one by one to verify streaming behavior + while let Some(event_result) = events_stream.next().await { + let event = event_result.expect("Failed to get event from stream"); + event_count += 1; + + // Verify we're getting events in order (events are 0-indexed) + assert_eq!( + event.version as usize, + event_count - 1, + "Events should be in order" + ); + } +} diff --git a/eventastic_postgres/tests/side_effect.rs b/eventastic_postgres/tests/side_effect.rs new file mode 100644 index 0000000..f73ef61 --- /dev/null +++ b/eventastic_postgres/tests/side_effect.rs @@ -0,0 +1,300 @@ +mod common; + +use common::helpers::{get_repository, get_side_effect}; +use common::test_aggregate::{Account, AccountEvent, SideEffects}; +use eventastic::aggregate::Root; +use eventastic_postgres::NoEncryption; +use uuid::Uuid; + +#[tokio::test] +async fn side_effect_is_correctly_stored() { + // Arrange + let repository = get_repository().await; + + // Create an Open event with a known event ID so we can query for it later + let account_id = Uuid::new_v4(); + let event_id = Uuid::new_v4(); + let email = "test@example.com".to_string(); + let starting_balance = 100; + + let open_event = AccountEvent::Open { + account_id, + event_id, + email: email.clone(), + starting_balance, + }; + + // Create an aggregate from the event + let mut account = Account::record_new(open_event).expect("Failed to create account"); + + // Act - Save the account which should trigger storing the side effect + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert - Verify the side effect was stored in the outbox table + let (side_effect, retries, requeue) = get_side_effect(event_id, NoEncryption) + .await + .expect("Side effect should be stored in outbox table"); + + // Assert the side effect contains the expected data + match side_effect { + SideEffects::SendEmail { + id: side_effect_id, + address, + content, + } => { + assert_eq!(side_effect_id, event_id); + assert_eq!(address, email); + assert!(content.contains(&account_id.to_string())); + assert!(content.contains(&starting_balance.to_string())); + } + _ => panic!("Expected SendEmail side effect"), + } + + // Assert the retry settings + assert_eq!(retries, 0, "Initial retries should be 0"); + assert!(requeue, "Side effect should be requeued by default"); +} + +#[tokio::test] +async fn multiple_side_effects_are_stored_correctly() { + // Arrange + let repository = get_repository().await; + + // Create an account with known event IDs + let account_id = Uuid::new_v4(); + let open_event_id = Uuid::new_v4(); + let add_event_id = Uuid::new_v4(); + + // Create an Open event + let open_event = AccountEvent::Open { + account_id, + event_id: open_event_id, + email: "test@example.com".to_string(), + starting_balance: 200, + }; + + // Create an aggregate from the event + let mut account = Account::record_new(open_event).expect("Failed to create account"); + + // Add another event that will generate a different side effect + let add_amount = 50; + let add_event = AccountEvent::Add { + event_id: add_event_id, + amount: add_amount, + }; + + account + .record_that(add_event) + .expect("Failed to apply Add event"); + + // Act - Save the account which should store both side effects + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert - Verify both side effects were stored + + // Check for the Open event's side effect + get_side_effect(open_event_id, NoEncryption) + .await + .expect("Open event side effect should be stored"); + + // Check for the Add event's side effect + let (side_effect, _, _) = get_side_effect(add_event_id, NoEncryption) + .await + .expect("Add event side effect should be stored"); + + match side_effect { + SideEffects::PublishMessage { id, message } => { + assert_eq!(id, add_event_id); + assert_eq!(message, add_amount.to_string()); + } + _ => panic!("Expected PublishMessage side effect"), + } +} + +#[tokio::test] +async fn side_effect_regeneration_works_correctly() { + use eventastic::aggregate::Context; + + // Arrange + let repository = get_repository().await; + + // Create an account with multiple events + let account_id = Uuid::new_v4(); + let open_event_id = Uuid::new_v4(); + let add_event_id = Uuid::new_v4(); + let remove_event_id = Uuid::new_v4(); + + // Create events + let open_event = AccountEvent::Open { + account_id, + event_id: open_event_id, + email: "test@example.com".to_string(), + starting_balance: 100, + }; + + let add_event = AccountEvent::Add { + event_id: add_event_id, + amount: 50, + }; + + let remove_event = AccountEvent::Remove { + event_id: remove_event_id, + amount: 25, + }; + + // Create and save the aggregate with all events + let mut account = Account::record_new(open_event.clone()).expect("Failed to create account"); + account + .record_that(add_event.clone()) + .expect("Failed to apply Add event"); + account + .record_that(remove_event.clone()) + .expect("Failed to apply Remove event"); + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Act & Assert - Test regenerating side effects for Open event (should generate SendEmail) + let mut repo_clone = repository.clone(); + let regenerated_open_effects = + Context::::regenerate_side_effects(&mut repo_clone, &account_id, &open_event_id) + .await + .expect("Failed to regenerate side effects for Open event"); + + assert!( + regenerated_open_effects.is_some(), + "Open event should generate side effects" + ); + let open_effects = regenerated_open_effects.unwrap(); + assert_eq!( + open_effects.len(), + 1, + "Open event should generate exactly one side effect" + ); + + match &open_effects[0] { + SideEffects::SendEmail { + id, + address, + content, + } => { + assert_eq!(*id, open_event_id, "Side effect ID should match event ID"); + assert_eq!(address, "test@example.com", "Email address should match"); + assert!( + content.contains(&account_id.to_string()), + "Content should contain account ID" + ); + assert!( + content.contains("100"), + "Content should contain starting balance" + ); + } + _ => panic!("Expected SendEmail side effect for Open event"), + } + + // Act & Assert - Test regenerating side effects for Add event (should generate PublishMessage) + let mut repo_clone = repository.clone(); + let regenerated_add_effects = + Context::::regenerate_side_effects(&mut repo_clone, &account_id, &add_event_id) + .await + .expect("Failed to regenerate side effects for Add event"); + + assert!( + regenerated_add_effects.is_some(), + "Add event should generate side effects" + ); + let add_effects = regenerated_add_effects.unwrap(); + assert_eq!( + add_effects.len(), + 1, + "Add event should generate exactly one side effect" + ); + + match &add_effects[0] { + SideEffects::PublishMessage { id, message } => { + assert_eq!(*id, add_event_id, "Side effect ID should match event ID"); + assert_eq!(message, "50", "Message should contain the amount"); + } + _ => panic!("Expected PublishMessage side effect for Add event"), + } + + // Act & Assert - Test regenerating side effects for Remove event (should generate no side effects) + let mut repo_clone = repository.clone(); + let regenerated_remove_effects = + Context::::regenerate_side_effects(&mut repo_clone, &account_id, &remove_event_id) + .await + .expect("Failed to regenerate side effects for Remove event"); + + assert!( + regenerated_remove_effects.is_none(), + "Remove event should not generate side effects" + ); + + // Act & Assert - Test error case: non-existent event ID + let non_existent_event_id = Uuid::new_v4(); + let mut repo_clone = repository.clone(); + let non_existent_result = Context::::regenerate_side_effects( + &mut repo_clone, + &account_id, + &non_existent_event_id, + ) + .await + .expect("Should not error for non-existent event ID"); + + assert!( + non_existent_result.is_none(), + "Non-existent event should return None" + ); + + // Act & Assert - Test error case: non-existent aggregate ID + let non_existent_aggregate_id = Uuid::new_v4(); + let mut repo_clone = repository.clone(); + let non_existent_aggregate_result = Context::::regenerate_side_effects( + &mut repo_clone, + &non_existent_aggregate_id, + &open_event_id, + ) + .await + .expect("Should not error for non-existent aggregate ID"); + + assert!( + non_existent_aggregate_result.is_none(), + "Non-existent aggregate should return None" + ); +} diff --git a/eventastic_postgres/tests/snapshots.rs b/eventastic_postgres/tests/snapshots.rs new file mode 100644 index 0000000..8b5d452 --- /dev/null +++ b/eventastic_postgres/tests/snapshots.rs @@ -0,0 +1,308 @@ +mod common; + +use common::helpers::{ + AccountBuilder, count_account_snapshots, delete_snapshot, get_account_snapshot, + get_account_snapshot_with_version, get_repository, insert_snapshot_with_version, load_account, + replace_account_snapshot, +}; +use common::test_aggregate::AccountEvent; +use uuid::Uuid; + +#[tokio::test] +pub async fn snapshots_are_saved_automatically() { + // Arrange + let repository = get_repository().await; + let mut account = AccountBuilder::new().build(); + let account_id = account.state().account_id; + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Act + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + let created_account = account.state(); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + let snapshot = get_account_snapshot(account_id) + .await + .expect("Failed to get snapshot"); + + let state = snapshot.aggregate; + + assert_eq!(created_account.clone(), state); +} + +#[tokio::test] +pub async fn aggregate_is_rebuilt_if_snapshots_are_missing() { + // Arrange + let account = AccountBuilder::new() + .with_add_event(100) + .with_remove_event(10) + .with_add_event(10) + .save() + .await; + + let account_id = account.state().account_id; + + delete_snapshot(account_id).await; + + assert!(get_account_snapshot(account_id).await.is_none()); + + // Act + + let rebuilt_account = load_account(account_id).await; + + // Assert + + assert_eq!(rebuilt_account.state(), account.state()); + // Snapshot is not saved again on load, it's only stored on save + assert!(get_account_snapshot(account_id).await.is_none()); +} + +#[tokio::test] +pub async fn snapshots_are_successfully_saved_when_new_event_is_applied() { + // Arrange + let repository = get_repository().await; + let mut account = AccountBuilder::new() + .with_add_event(100) + .with_remove_event(10) + .with_add_event(10) + .save() + .await; + + let account_id = account.state().account_id; + + delete_snapshot(account_id).await; + + assert!(get_account_snapshot(account_id).await.is_none()); + + // Act + + account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 10, + }) + .expect("Failed to apply event"); + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert + + let saved_snapshot = get_account_snapshot(account_id) + .await + .expect("Failed to get snapshot"); + + let saved_state = saved_snapshot.aggregate; + + assert_eq!(&saved_state, account.state()); +} + +#[tokio::test] +pub async fn snapshots_are_rebuilt_if_snapshot_version_is_different() { + // Arrange + let account = AccountBuilder::new() + .with_add_event(100) + .with_remove_event(10) + .with_add_event(10) + .save() + .await; + + let account_id = account.state().account_id; + + let snapshot = get_account_snapshot(account_id) + .await + .expect("Failed to get snapshot"); + + let mut snapshot = snapshot.clone(); + snapshot.snapshot_version = 0; + + snapshot.aggregate.balance = 0; + + // Insert our modified snapshot with a different version and balance + replace_account_snapshot(account_id, snapshot).await; + + // Act + + // Account should be rebuilt and not use the snapshot with the wrong version + let rebuilt_account = load_account(account_id).await; + + // Assert + + assert_eq!(rebuilt_account.state(), account.state()); +} + +#[tokio::test] +pub async fn multiple_snapshot_versions_can_coexist() { + // Arrange + let account = AccountBuilder::new() + .with_add_event(100) + .with_remove_event(10) + .save() + .await; + + let account_id = account.state().account_id; + + // Get the current snapshot (version 2) + let current_snapshot = get_account_snapshot(account_id) + .await + .expect("Failed to get current snapshot"); + + // Create a snapshot with version 1 (different from current version 2) + let mut old_snapshot = current_snapshot.clone(); + old_snapshot.aggregate.balance = 500; // Different balance to verify they coexist + + // Act - Insert snapshot with version 1 + insert_snapshot_with_version(account_id, old_snapshot.clone(), 1).await; + + // Assert - Both snapshots should exist + assert_eq!(count_account_snapshots(account_id).await, 2); + + // Verify we can retrieve each snapshot by version + let version_1_snapshot = get_account_snapshot_with_version(account_id, 1) + .await + .expect("Failed to get version 1 snapshot"); + let version_2_snapshot = get_account_snapshot_with_version(account_id, 2) + .await + .expect("Failed to get version 2 snapshot"); + + assert_eq!(version_1_snapshot.aggregate.balance, 500); + assert_eq!(version_2_snapshot.aggregate.balance, 90); + assert_eq!(version_1_snapshot.snapshot_version, 1); + assert_eq!(version_2_snapshot.snapshot_version, 2); + + // Verify loading the aggregate uses the correct current version (2) + let loaded_account = load_account(account_id).await; + assert_eq!(loaded_account.state().balance, 90); +} + +#[tokio::test] +pub async fn same_snapshot_version_updates_existing_snapshot() { + // Arrange + let repository = get_repository().await; + let mut account = AccountBuilder::new().with_add_event(100).build(); + + let account_id = account.state().account_id; + + // Save the initial account (creates snapshot with version 2) + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Verify initial state + assert_eq!(count_account_snapshots(account_id).await, 1); + let initial_snapshot = get_account_snapshot(account_id) + .await + .expect("Failed to get initial snapshot"); + assert_eq!(initial_snapshot.aggregate.balance, 100); + + // Act - Modify the account and save again (same SNAPSHOT_VERSION) + account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 50, + }) + .expect("Failed to apply event"); + + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Assert - Still only one snapshot, but updated + assert_eq!(count_account_snapshots(account_id).await, 1); + let updated_snapshot = get_account_snapshot(account_id) + .await + .expect("Failed to get updated snapshot"); + assert_eq!(updated_snapshot.aggregate.balance, 150); + assert_eq!(updated_snapshot.snapshot_version, 2); +} + +#[tokio::test] +pub async fn old_snapshots_are_ignored_when_loading() { + // Arrange + let account = AccountBuilder::new() + .with_add_event(100) + .with_remove_event(10) + .save() + .await; + + let account_id = account.state().account_id; + + // Get the current snapshot (version 2) and verify balance + let current_snapshot = get_account_snapshot(account_id) + .await + .expect("Failed to get current snapshot"); + assert_eq!(current_snapshot.aggregate.balance, 90); + + // Create snapshots with old versions (0 and 1) with different balances + let mut old_snapshot_v0 = current_snapshot.clone(); + old_snapshot_v0.aggregate.balance = 999; // Wrong balance + + let mut old_snapshot_v1 = current_snapshot.clone(); + old_snapshot_v1.aggregate.balance = 888; // Wrong balance + + // Act - Insert old snapshots + insert_snapshot_with_version(account_id, old_snapshot_v0, 0).await; + insert_snapshot_with_version(account_id, old_snapshot_v1, 1).await; + + // Assert - All snapshots exist in database + assert_eq!(count_account_snapshots(account_id).await, 3); + + // Verify we can retrieve each snapshot by version + let version_0_snapshot = get_account_snapshot_with_version(account_id, 0) + .await + .expect("Failed to get version 0 snapshot"); + let version_1_snapshot = get_account_snapshot_with_version(account_id, 1) + .await + .expect("Failed to get version 1 snapshot"); + let version_2_snapshot = get_account_snapshot_with_version(account_id, 2) + .await + .expect("Failed to get version 2 snapshot"); + + assert_eq!(version_0_snapshot.aggregate.balance, 999); + assert_eq!(version_1_snapshot.aggregate.balance, 888); + assert_eq!(version_2_snapshot.aggregate.balance, 90); + + // Most important: Loading the aggregate should use the correct current version (2) + // and ignore the old snapshots with incorrect balances + let loaded_account = load_account(account_id).await; + assert_eq!(loaded_account.state().balance, 90); +} diff --git a/eventastic_postgres/tests/validation.rs b/eventastic_postgres/tests/validation.rs new file mode 100644 index 0000000..b22fe5a --- /dev/null +++ b/eventastic_postgres/tests/validation.rs @@ -0,0 +1,228 @@ +mod common; + +use common::helpers::{AccountBuilder, get_repository}; +use common::test_aggregate::AccountEvent; +use eventastic::aggregate::SaveError; +use uuid::Uuid; + +#[tokio::test] +pub async fn idempotency_error_if_event_with_different_content_is_saved() { + // Arrange + // Create and save a new account to the repository + let mut account = AccountBuilder::new().save().await; + + // Get a repository instance to interact with the database + let repository = get_repository().await; + + // Generate a UUID that will be reused for two different events + // This will create an idempotency conflict since the event ID should uniquely identify the event content + let event_id = Uuid::new_v4(); + + // Create first event with the generated ID and amount 10 + let add_event = AccountEvent::Add { + event_id, + amount: 10, // First event adds 10 to the account + }; + + // Record the event in the aggregate's context, which applies it to the account state + account + .record_that(add_event) + .expect("Failed to apply event"); + + // Begin a transaction to save the event to the repository + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Save the account with the new event + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + // Commit the transaction to persist the changes + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Act + // Create a second event with the SAME ID but DIFFERENT content (amount 20 instead of 10) + // This should violate idempotency since events with the same ID should have the same content + let add_event = AccountEvent::Add { + event_id, // Same event ID as before + amount: 20, // Different amount (20 instead of 10) + }; + + // Record the conflicting event + account + .record_that(add_event) + .expect("Failed to apply event"); + + // Begin a new transaction to try to save the conflicting event + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Try to save the account with the conflicting event + // This should fail with an IdempotencyError since an event with this ID + // but different content already exists in the repository + let err = account + .save(&mut transaction) + .await + .expect_err("Failed get error"); + + // Assert + // Verify the error is an IdempotencyError and contains the expected events + // First parameter is the saved event (amount 10), second parameter is the conflicting event (amount 20) + assert!(matches!(err, + SaveError::IdempotencyError( + AccountEvent::Add { amount: 10, event_id: id1 }, + AccountEvent::Add { amount: 20, event_id: id2 } + ) if id1 == event_id && id2 == event_id + )); +} + +#[tokio::test] +pub async fn no_idempotency_error_if_event_with_same_content_is_saved() { + // Arrange + // Create and save a new account to the repository + let mut account = AccountBuilder::new().save().await; + + // Get a repository instance to interact with the database + let repository = get_repository().await; + + // Generate a UUID that will be reused for two different events + // This will create an idempotency conflict since the event ID should uniquely identify the event content + let event_id = Uuid::new_v4(); + + // Create first event with the generated ID and amount 10 + let add_event = AccountEvent::Add { + event_id, + amount: 10, // First event adds 10 to the account + }; + + // Record the event in the aggregate's context, which applies it to the account state + account + .record_that(add_event.clone()) + .expect("Failed to apply event"); + + // Begin a transaction to save the event to the repository + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Save the account with the new event + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + // Commit the transaction to persist the changes + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Act + + // Record the conflicting event + account + .record_that(add_event) + .expect("Failed to apply event"); + + // Begin a new transaction to try to save the conflicting event + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Try to save the account with the conflicting event + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + transaction + .commit() + .await + .expect("Failed to commit transaction"); +} + +#[tokio::test] +pub async fn optimistic_concurrency_error_if_aggregate_was_updated_by_another() { + // Arrange + // Create a new account and save it to the repository + let mut account = AccountBuilder::new().save().await; + + // Create a clone of the account to simulate a concurrent access scenario + // Both instances represent the same aggregate but will diverge as changes are made + let mut outdated_account = account.clone(); + let account_id = account.state().account_id; + + // Create an event to add 10 to the account + let add_event = AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 10, + }; + + // Apply the event to the first account instance + account + .record_that(add_event) + .expect("Failed to apply event"); + + // Get a repository instance + let repository = get_repository().await; + + // Begin a transaction to save the updated account + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Save the account with its new event + account + .save(&mut transaction) + .await + .expect("Failed to save account"); + + // Commit the transaction - at this point, the account in the repository has version 1 + transaction + .commit() + .await + .expect("Failed to commit transaction"); + + // Act + // Attempt to update the account with an outdated version + // The outdated_account is still at version 0, but the repository has version 1 + + // Apply a different event to the outdated account instance + outdated_account + .record_that(AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 20, + }) + .expect("Failed to apply event"); + + // Begin a new transaction to try to save the outdated account + let mut transaction = repository + .begin_transaction() + .await + .expect("Failed to begin transaction"); + + // Try to save the outdated account - this should fail with an OptimisticConcurrency error + // because the repository version is ahead of what the outdated_account expects + let err = outdated_account + .save(&mut transaction) + .await + .expect_err("Failed to get error while saving account"); + + // Assert + // Verify we got an OptimisticConcurrency error with the correct aggregate ID and version + assert!( + matches!(err, SaveError::OptimisticConcurrency(id, version) if id == account_id && version == 1) + ); +} diff --git a/examples/bank/Cargo.toml b/examples/bank/Cargo.toml index 23346bc..8a2f0bc 100644 --- a/examples/bank/Cargo.toml +++ b/examples/bank/Cargo.toml @@ -1,17 +1,17 @@ [package] name = "bank" version = "0.1.0" -edition = "2021" +edition = "2024" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] eventastic = { path = "../../eventastic" } -eventastic_postgres = { path = "../../eventastic_postgres" } +eventastic_postgres = { path = "../../eventastic_postgres", features = ["serde"] } +eventastic_outbox_postgres = { path = "../../eventastic_outbox_postgres" } thiserror = { workspace = true } uuid = { workspace = true } tokio = { workspace = true } sqlx = { workspace = true } -anyhow = { workspace = true } serde = { workspace = true } async-trait = { workspace = true } diff --git a/examples/bank/src/main.rs b/examples/bank/src/main.rs index 645d525..b1b7d31 100644 --- a/examples/bank/src/main.rs +++ b/examples/bank/src/main.rs @@ -1,17 +1,15 @@ use std::str::FromStr; -use async_trait::async_trait; use eventastic::aggregate::Aggregate; use eventastic::aggregate::Context; -use eventastic::aggregate::RecordError; use eventastic::aggregate::Root; +use eventastic::aggregate::SaveError; use eventastic::aggregate::SideEffect; -use eventastic::aggregate::SideEffectHandler; -use eventastic::event::Event; -use eventastic::repository::RepositoryTransaction; -use eventastic_postgres::PostgresRepository; - +use eventastic::event::DomainEvent; +use eventastic::repository::Repository; +use eventastic_outbox_postgres::{RepositoryOutboxExt, SideEffectHandler, TableOutbox}; +use eventastic_postgres::{NoEncryption, PostgresRepository, RootExt, TableConfig}; use serde::Deserialize; use serde::Serialize; use sqlx::{pool::PoolOptions, postgres::PgConnectOptions}; @@ -19,7 +17,7 @@ use thiserror::Error; use uuid::Uuid; #[tokio::main] -async fn main() -> Result<(), anyhow::Error> { +async fn main() -> Result<(), Box> { // Setup postgres repo let repository = get_repository().await; @@ -27,13 +25,14 @@ async fn main() -> Result<(), anyhow::Error> { repository.run_migrations().await?; - // Run our side effects handler in a background task - tokio::spawn(async { - let repository = get_repository().await; - - let _ = repository - .start_outbox(SideEffectContext {}, std::time::Duration::from_secs(5)) - .await; + // Run our side effect handler in the background + tokio::spawn({ + let repo = repository.clone(); + async move { + let _ = repo + .start_outbox(SideEffectContext {}, std::time::Duration::from_secs(5)) + .await; + } }); // Start transaction @@ -62,10 +61,7 @@ async fn main() -> Result<(), anyhow::Error> { }; // Record add fund events. - // Record takes in the transaction, as it does idempotency checks with the db. - account - .record_that(&mut transaction, add_event.clone()) - .await?; + account.record_that(add_event.clone())?; // Save uncommitted events and side effects in the db. transaction.store(&mut account).await?; @@ -73,41 +69,144 @@ async fn main() -> Result<(), anyhow::Error> { // Commit the transaction transaction.commit().await?; - // Get the aggregate from the db + // Get the aggregate from the db using a transaction (read-write access) let mut transaction = repository.begin_transaction().await?; - let mut account: Context = transaction.get(&account_id).await?; + let mut account = Account::load_with_transaction(&mut transaction, account_id).await?; // Check our balance is correct assert_eq!(account.state().balance, 345); + // Demonstrate loading without a transaction (read-only access, more efficient) + let account_readonly: Context = repository.load(&account_id).await?; + assert_eq!(account_readonly.state().balance, 345); + println!("Successfully loaded account with non-transactional method"); + // Trying to apply the same event id but with different content gives us an IdempotencyError let changed_add_event = AccountEvent::Add { event_id: add_event_id, amount: 123, }; - let err = account - .record_that(&mut transaction, changed_add_event) - .await - .expect_err("failed to get error"); + account.record_that(changed_add_event)?; - assert!(matches!(err, RecordError::IdempotencyError(_, _))); + // Applying the already applied event with different content should fail with an IdempotencyError + let error = transaction + .store(&mut account) + .await + .expect_err("Failed to get idempotency error"); - // Applying the already applied event, will be ignored and return Ok - account.record_that(&mut transaction, add_event).await?; + assert!(matches!(error, SaveError::IdempotencyError(_, _))); transaction.commit().await?; let mut transaction = repository.begin_transaction().await?; - let account: Context = transaction.get(&account_id).await?; + let mut transaction_2 = repository.begin_transaction().await?; + + let mut old_account_version: Context = transaction_2.get(&account_id).await?; + + let mut account: Context = transaction.get(&account_id).await?; // Balance hasn't changed since the event wasn't actually applied assert_eq!(account.state().balance, 345); println!("Got account {account:?}"); + // Apply a new add event and save to our db. This should have version number 2 + let add_event = AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 456, + }; + + account.record_that(add_event)?; + transaction.store(&mut account).await?; + transaction.commit().await?; + + // Attempt to apply another event to our aggregate, but with an out of date version number + // This happens normally when two applies are executed concurrently + // This will attempt to apply a different event with a version number 2 + // This should fail with an optimistic concurrency error + let add_event = AccountEvent::Add { + event_id: Uuid::new_v4(), + amount: 789, + }; + + old_account_version.record_that(add_event)?; + + let err = transaction_2 + .store(&mut old_account_version) + .await + .expect_err("Failed to get optimistic concurrency error"); + + assert!(matches!( + err, + SaveError::OptimisticConcurrency(id, version) if id == account_id && version == 2 + )); + + transaction_2.commit().await?; + + // Demonstrate side effect regeneration + + // Regenerate side effects for the account open event + let regenerated_side_effects = Context::::regenerate_side_effects( + &mut repository.clone(), + &account_id, + &event_id, // This is the Open event ID from the beginning + ) + .await?; + + println!("Original event ID: {event_id}"); + + if let Some(side_effects) = regenerated_side_effects { + println!( + "Successfully regenerated {} side effect(s)", + side_effects.len() + ); + + for effect in side_effects { + match &effect { + SideEffects::SendEmail { + id, + address, + content, + } => { + // Verify this matches what we expect + assert_eq!(id, &event_id); + assert_eq!(address, "user@example.com"); + assert!(content.contains(&account_id.to_string())); + assert!(content.contains("21")); // starting balance + } + SideEffects::PublishMessage { .. } => { + println!(" - PublishMessage (unexpected for Open event)"); + } + } + } + } else { + println!("No side effects were regenerated (this shouldn't happen for Open event)"); + } + + // Also demonstrate regenerating for an event that doesn't produce side effects + println!("\nRegenerating side effects for Add event:"); + let no_side_effects = Context::::regenerate_side_effects( + &mut repository.clone(), + &account_id, + &add_event_id, + ) + .await?; + + match no_side_effects { + Some(effects) => { + println!( + "Unexpected: {} side effects generated for Add event", + effects.len() + ); + } + None => { + println!("No side effects generated for Add event (as expected)"); + } + } + tokio::time::sleep(std::time::Duration::from_secs(30)).await; Ok(()) } @@ -138,7 +237,8 @@ pub enum AccountEvent { }, } -impl Event for AccountEvent { +impl DomainEvent for AccountEvent { + type EventId = Uuid; fn id(&self) -> &Uuid { match self { AccountEvent::Open { event_id, .. } @@ -173,41 +273,25 @@ pub enum SideEffects { impl SideEffect for SideEffects { /// The type used to uniquely identify this side effect. - type Id = Uuid; - /// The error type that can be returned when calling a [`SideEffectHandler::handle`] - type Error = SideEffectError; + type SideEffectId = Uuid; - fn id(&self) -> &Self::Id { + fn id(&self) -> &Self::SideEffectId { match self { SideEffects::PublishMessage { id, .. } | SideEffects::SendEmail { id, .. } => id, } } } -// Define our side effect errors -#[derive(Error, Debug)] -pub enum SideEffectError { - #[error("Failed to publish message")] - PublishMessageError, - #[error("Failed to send email")] - SendEmailError, -} - -pub struct SideEffectContext {} +pub struct SideEffectContext; -#[async_trait] +#[async_trait::async_trait] impl SideEffectHandler for SideEffectContext { type SideEffect = SideEffects; + type Error = (); - /// Handle a side effect - /// If Ok(()) is returned, the side effect is complete and it will be deleted from the repository. - /// If Err((true, Error)) is returned, the side effect be will requeued - /// if Err((false, Error)) is returned, the side effect won't be requeued - async fn handle(&self, msg: &SideEffects, retires: u16) -> Result<(), (bool, SideEffectError)> { - println!("Got side effect message {msg:?} with retires {retires}"); - let requeue = retires < 3; - - Err((requeue, SideEffectError::PublishMessageError)) + async fn handle(&self, msg: &SideEffects, retries: u16) -> Result<(), (bool, Self::Error)> { + println!("handling side effect {:?} retries {}", msg, retries); + Ok(()) } } @@ -224,9 +308,6 @@ impl Aggregate for Account { /// Usually, this type should be an `enum`. type DomainEvent = AccountEvent; - /// The type used to uniquely identify the a given domain event. - type DomainEventId = Uuid; - /// The error type that can be returned by [`Aggregate::apply`] when /// mutating the Aggregate state. type ApplyError = DomainError; @@ -303,13 +384,21 @@ impl Aggregate for Account { } } -async fn get_repository() -> PostgresRepository { +// Using the default outbox implementation +// You can also implement your own outbox handler by implementing the `SideEffectStorage` trait +async fn get_repository() -> PostgresRepository, NoEncryption> { let connection_options = PgConnectOptions::from_str("postgres://postgres:password@localhost/postgres").unwrap(); let pool_options = PoolOptions::default(); - PostgresRepository::new(connection_options, pool_options) - .await - .unwrap() + PostgresRepository::new( + connection_options, + pool_options, + TableConfig::new("events", "snapshots"), + TableOutbox::new(NoEncryption), + NoEncryption, + ) + .await + .unwrap() } diff --git a/justfile b/justfile new file mode 100644 index 0000000..debb471 --- /dev/null +++ b/justfile @@ -0,0 +1,29 @@ +# List available recipes +default: + @just db + +# Start PostgreSQL database using Docker +db: + @bash scripts/postgres.sh + sqlx migrate run --source eventastic_postgres/migrations/ + +# Build the project +build: + cargo build + +# Build with optimizations +build-release: + cargo build --release + +# Run all tests +test: db + cargo test --workspace --all-features + +# Run clippy lints +lint: + cargo fmt --all + cargo clippy --workspace --all-features -- -D warnings + +# Run example +example example="bank": + cd examples/{{example}} && cargo run diff --git a/scripts/postgres.sh b/scripts/postgres.sh index 0831259..61781cf 100755 --- a/scripts/postgres.sh +++ b/scripts/postgres.sh @@ -1,5 +1,6 @@ -if ! nc -vz 0.0.0.0 5432; then +if ! nc -z 0.0.0.0 5432; then docker run -d -p 5432:5432 -e POSTGRES_PASSWORD=password postgres +fi -fi \ No newline at end of file +echo "Postgres is running on port 5432" \ No newline at end of file