diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ad75d79f..4b131ac9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,6 +13,7 @@ jobs: - test-mysql - test-mysql-async - test-tiberius + - test-duckdb - doc steps: - run: exit 0 @@ -54,6 +55,20 @@ jobs: - run: cargo install --path ./refinery_cli --no-default-features --features=sqlite-bundled - run: cd refinery && cargo test --features rusqlite --test rusqlite + test-duckdb: + name: Test duckdb + runs-on: ubuntu-latest + strategy: + matrix: + rust: [stable, nightly, 1.65.0] + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ matrix.rust }} + - run: cargo install --path ./refinery_cli --no-default-features --features=duckdb + - run: cd refinery && cargo test --features duckdb --test duckdb + test-postgres: name: Test postgres runs-on: ubuntu-latest diff --git a/refinery/Cargo.toml b/refinery/Cargo.toml index a18ecf4c..8d6d3e7c 100644 --- a/refinery/Cargo.toml +++ b/refinery/Cargo.toml @@ -2,7 +2,10 @@ name = "refinery" version = "0.8.11" rust-version = "1.56" -authors = ["Katharina Fey ", "João Oliveira "] +authors = [ + "Katharina Fey ", + "João Oliveira ", +] license = "MIT" description = "Powerful SQL migration toolkit for Rust" readme = "README.md" @@ -13,7 +16,7 @@ categories = ["database"] edition = "2018" [features] -default = [] +default = ["duckdb"] rusqlite-bundled = ["refinery-core/rusqlite-bundled"] rusqlite = ["refinery-core/rusqlite"] postgres = ["refinery-core/postgres"] @@ -22,13 +25,19 @@ tokio-postgres = ["refinery-core/tokio-postgres"] mysql_async = ["refinery-core/mysql_async"] tiberius = ["refinery-core/tiberius"] tiberius-config = ["refinery-core/tiberius", "refinery-core/tiberius-config"] +duckdb = ["refinery-core/duckdb"] [dependencies] refinery-core = { version = "0.8.11", path = "../refinery_core" } refinery-macros = { version = "0.8.11", path = "../refinery_macros" } [dev-dependencies] -barrel = { git = "https://github.com/jxs/barrel", features = ["sqlite3", "pg", "mysql", "mssql"] } +barrel = { git = "https://github.com/jxs/barrel", features = [ + "sqlite3", + "pg", + "mysql", + "mssql", +] } futures = "0.3" assert_cmd = "2.0" predicates = "3" diff --git a/refinery/tests/duckdb.rs b/refinery/tests/duckdb.rs new file mode 100644 index 00000000..8f5c6249 --- /dev/null +++ b/refinery/tests/duckdb.rs @@ -0,0 +1,851 @@ +#[cfg(feature = "duckdb")] +mod ddb { + use assert_cmd::prelude::*; + use predicates::str::contains; + use refinery::{ + config::{Config, ConfigDbType}, + embed_migrations, + error::Kind, + Migrate, Migration, Runner, Target, + }; + use refinery_core::duckdb::Error; + use refinery_core::duckdb::{Connection, OptionalExt}; + use std::fs::{self, File}; + use std::process::Command; + use time::OffsetDateTime; + + const DEFAULT_TABLE_NAME: &str = "refinery_schema_history"; + + mod embedded { + use refinery::embed_migrations; + embed_migrations!("./tests/sql_migrations"); + } + + mod broken { + use refinery::embed_migrations; + embed_migrations!("./tests/migrations_broken"); + } + + mod missing { + use refinery::embed_migrations; + embed_migrations!("./tests/migrations_missing"); + } + + fn run_test(test: T) + where + T: FnOnce() + std::panic::UnwindSafe, + { + let filepath = "tests/db.sql"; + File::create(filepath).unwrap(); + + let result = std::panic::catch_unwind(|| test()); + + fs::remove_file(filepath).unwrap(); + + assert!(result.is_ok()) + } + + fn get_migrations() -> Vec { + embed_migrations!("./tests/sql_migrations"); + + let migration1 = Migration::unapplied( + "V1__initial.sql", + include_str!("./sql_migrations/V1-2/V1__initial.sql"), + ) + .unwrap(); + + let migration2 = Migration::unapplied( + "V2__add_cars_and_motos_table.sql", + include_str!("./sql_migrations/V1-2/V2__add_cars_and_motos_table.sql"), + ) + .unwrap(); + + let migration3 = Migration::unapplied( + "V3__add_brand_to_cars_table", + include_str!("./sql_migrations/V3/V3__add_brand_to_cars_table.sql"), + ) + .unwrap(); + + let migration4 = Migration::unapplied( + "V4__add_year_to_motos_table.rs", + include_str!("./sql_migrations/V4__add_year_to_motos_table.sql"), + ) + .unwrap(); + + let migration5 = Migration::unapplied( + "V5__add_year_field_to_cars", + "ALTER TABLE cars ADD year INTEGER;", + ) + .unwrap(); + + vec![migration1, migration2, migration3, migration4, migration5] + } + + #[test] + fn report_contains_applied_migrations() { + let mut conn = Connection::open_in_memory().unwrap(); + let report = embedded::migrations::runner().run(&mut conn).unwrap(); + + let migrations = get_migrations(); + let applied_migrations = report.applied_migrations(); + + assert_eq!(4, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + assert_eq!(migrations[3].version(), applied_migrations[3].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + assert_eq!(migrations[3].name(), applied_migrations[3].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + assert_eq!(migrations[3].checksum(), applied_migrations[3].checksum()); + } + + #[test] + fn report_contains_applied_migrations_iter() { + let mut conn = Connection::open_in_memory().unwrap(); + let applied_migrations = embedded::migrations::runner() + .run_iter(&mut conn) + .collect::, _>>() + .unwrap(); + + let migrations = get_migrations(); + + assert_eq!(4, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + assert_eq!(migrations[3].version(), applied_migrations[3].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + assert_eq!(migrations[3].name(), applied_migrations[3].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + assert_eq!(migrations[3].checksum(), applied_migrations[3].checksum()); + } + + #[test] + fn creates_migration_table() { + let mut conn = Connection::open_in_memory().unwrap(); + embedded::migrations::runner().run(&mut conn).unwrap(); + let table_name: String = conn + .query_row( + &format!( + "SELECT table_name FROM information_schema.tables WHERE table_name='{}'", + DEFAULT_TABLE_NAME + ), + [], + |row| row.get(0), + ) + .unwrap(); + assert_eq!(DEFAULT_TABLE_NAME, table_name); + } + + #[test] + fn creates_migration_table_iter() { + let mut conn = Connection::open_in_memory().unwrap(); + embedded::migrations::runner() + .run_iter(&mut conn) + .collect::, _>>() + .unwrap(); + let table_name: String = conn + .query_row( + &format!( + "SELECT table_name FROM information_schema.tables WHERE table_name='{}'", + DEFAULT_TABLE_NAME + ), + [], + |row| row.get(0), + ) + .unwrap(); + assert_eq!(DEFAULT_TABLE_NAME, table_name); + } + + #[test] + fn creates_migration_table_grouped_transaction() { + let mut conn = Connection::open_in_memory().unwrap(); + embedded::migrations::runner() + .set_grouped(true) + .run(&mut conn) + .unwrap(); + let table_name: String = conn + .query_row( + &format!( + "SELECT table_name FROM information_schema.tables WHERE table_name='{}'", + DEFAULT_TABLE_NAME + ), + [], + |row| row.get(0), + ) + .unwrap(); + assert_eq!(DEFAULT_TABLE_NAME, table_name); + } + + #[test] + fn applies_migration() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner().run(&mut conn).unwrap(); + + conn.execute( + "INSERT INTO persons (name, city) VALUES (?, ?)", + &["John Legend", "New York"], + ) + .unwrap(); + let (name, city): (String, String) = conn + .query_row("SELECT name, city FROM persons", [], |row| { + Ok((row.get(0).unwrap(), row.get(1).unwrap())) + }) + .unwrap(); + assert_eq!("John Legend", name); + assert_eq!("New York", city); + } + + #[test] + fn applies_migration_iter() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner() + .run_iter(&mut conn) + .collect::, _>>() + .unwrap(); + + conn.execute( + "INSERT INTO persons (name, city) VALUES (?, ?)", + &["John Legend", "New York"], + ) + .unwrap(); + let (name, city): (String, String) = conn + .query_row("SELECT name, city FROM persons", [], |row| { + Ok((row.get(0).unwrap(), row.get(1).unwrap())) + }) + .unwrap(); + assert_eq!("John Legend", name); + assert_eq!("New York", city); + } + + #[test] + fn applies_migration_grouped_transaction() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner() + .set_grouped(true) + .run(&mut conn) + .unwrap(); + + conn.execute( + "INSERT INTO persons (name, city) VALUES (?, ?)", + &["John Legend", "New York"], + ) + .unwrap(); + let (name, city): (String, String) = conn + .query_row("SELECT name, city FROM persons", [], |row| { + Ok((row.get(0).unwrap(), row.get(1).unwrap())) + }) + .unwrap(); + assert_eq!("John Legend", name); + assert_eq!("New York", city); + } + + #[test] + fn updates_schema_history() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner().run(&mut conn).unwrap(); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + assert_eq!(4, current.version()); + + assert_eq!( + OffsetDateTime::now_utc().date(), + current.applied_on().unwrap().date() + ); + } + + #[test] + fn updates_schema_history_iter() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner() + .run_iter(&mut conn) + .collect::, _>>() + .unwrap(); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + assert_eq!(4, current.version()); + + assert_eq!( + OffsetDateTime::now_utc().date(), + current.applied_on().unwrap().date() + ); + } + + #[test] + fn updates_schema_history_grouped_transaction() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner() + .set_grouped(true) + .run(&mut conn) + .unwrap(); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + assert_eq!(4, current.version()); + + assert_eq!( + OffsetDateTime::now_utc().date(), + current.applied_on().unwrap().date() + ); + } + + #[test] + fn updates_to_last_working_if_not_grouped() { + let mut conn = Connection::open_in_memory().unwrap(); + + let result = broken::migrations::runner().run(&mut conn); + + assert!(result.is_err()); + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + let err = result.unwrap_err(); + let migrations = get_migrations(); + let applied_migrations = err.report().unwrap().applied_migrations(); + + assert_eq!( + OffsetDateTime::now_utc().date(), + current.applied_on().unwrap().date() + ); + assert_eq!(2, current.version()); + assert_eq!(2, applied_migrations.len()); + + assert_eq!(1, applied_migrations[0].version()); + assert_eq!(2, applied_migrations[1].version()); + + assert_eq!("initial", migrations[0].name()); + assert_eq!("add_cars_table", applied_migrations[1].name()); + + assert_eq!(2959965718684201605, applied_migrations[0].checksum()); + assert_eq!(8238603820526370208, applied_migrations[1].checksum()); + } + #[test] + + fn updates_to_last_working_if_iter() { + let mut conn = Connection::open_in_memory().unwrap(); + + let result: Result, _> = broken::migrations::runner().run_iter(&mut conn).collect(); + + assert!(result.is_err()); + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + let err = result.unwrap_err(); + let migrations = get_migrations(); + let applied_migrations = broken::migrations::runner() + .get_applied_migrations(&mut conn) + .unwrap(); + + assert_eq!( + OffsetDateTime::now_utc().date(), + current.applied_on().unwrap().date() + ); + assert_eq!(2, current.version()); + assert!(err.report().unwrap().applied_migrations().is_empty()); + assert_eq!(2, applied_migrations.len()); + + assert_eq!(1, applied_migrations[0].version()); + assert_eq!(2, applied_migrations[1].version()); + + assert_eq!("initial", migrations[0].name()); + assert_eq!("add_cars_table", applied_migrations[1].name()); + + assert_eq!(2959965718684201605, applied_migrations[0].checksum()); + assert_eq!(8238603820526370208, applied_migrations[1].checksum()); + } + + #[test] + fn doesnt_update_to_last_working_if_grouped() { + let mut conn = Connection::open_in_memory().unwrap(); + + let result = broken::migrations::runner() + .set_grouped(true) + .run(&mut conn); + + assert!(result.is_err()); + let query: Option = conn + .query_row("SELECT version FROM refinery_schema_history", [], |row| { + row.get(0) + }) + .optional() + .unwrap(); + assert!(query.is_none()); + } + + #[test] + fn gets_applied_migrations() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner().run(&mut conn).unwrap(); + + let migrations = get_migrations(); + let applied_migrations = conn.get_applied_migrations(DEFAULT_TABLE_NAME).unwrap(); + assert_eq!(4, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + assert_eq!(migrations[3].version(), applied_migrations[3].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + assert_eq!(migrations[3].name(), applied_migrations[3].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + assert_eq!(migrations[3].checksum(), applied_migrations[3].checksum()); + } + + #[test] + fn applies_new_migration() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner().run(&mut conn).unwrap(); + + let migrations = get_migrations(); + + let mchecksum = migrations[4].checksum(); + conn.migrate( + &migrations, + true, + true, + false, + Target::Latest, + DEFAULT_TABLE_NAME, + ) + .unwrap(); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + assert_eq!(5, current.version()); + assert_eq!(mchecksum, current.checksum()); + } + + #[test] + fn migrates_to_target_migration() { + let mut conn = Connection::open_in_memory().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Version(3)) + .run(&mut conn) + .unwrap(); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + let applied_migrations = report.applied_migrations(); + let migrations = get_migrations(); + + assert_eq!(3, current.version()); + + assert_eq!(3, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + } + + #[test] + fn migrates_to_target_migration_iter() { + let mut conn = Connection::open_in_memory().unwrap(); + + let applied_migrations = embedded::migrations::runner() + .set_target(Target::Version(3)) + .run_iter(&mut conn) + .collect::, _>>() + .unwrap(); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + let migrations = get_migrations(); + + assert_eq!(3, current.version()); + + assert_eq!(3, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + } + + #[test] + fn migrates_to_target_migration_grouped() { + let mut conn = Connection::open_in_memory().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Version(3)) + .set_grouped(true) + .run(&mut conn) + .unwrap(); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + + let applied_migrations = report.applied_migrations(); + let migrations = get_migrations(); + + assert_eq!(3, current.version()); + + assert_eq!(3, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + } + + #[test] + fn aborts_on_missing_migration_on_filesystem() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner().run(&mut conn).unwrap(); + + let migration = Migration::unapplied( + "V4__add_year_field_to_cars", + "ALTER TABLE cars ADD year INTEGER;", + ) + .unwrap(); + let err = conn + .migrate( + &[migration], + true, + true, + false, + Target::Latest, + DEFAULT_TABLE_NAME, + ) + .unwrap_err(); + + match err.kind() { + Kind::MissingVersion(missing) => { + assert_eq!(1, missing.version()); + assert_eq!("initial", missing.name()); + } + _ => panic!("failed test"), + } + } + + #[test] + fn aborts_on_divergent_migration() { + let mut conn = Connection::open_in_memory().unwrap(); + + embedded::migrations::runner().run(&mut conn).unwrap(); + + let migration = Migration::unapplied( + "V2__add_year_field_to_cars", + "ALTER TABLE cars ADD year INTEGER;", + ) + .unwrap(); + let err = conn + .migrate( + &[migration.clone()], + true, + false, + false, + Target::Latest, + DEFAULT_TABLE_NAME, + ) + .unwrap_err(); + + match err.kind() { + Kind::DivergentVersion(applied, divergent) => { + assert_eq!(&migration, divergent); + assert_eq!(2, applied.version()); + assert_eq!("add_cars_and_motos_table", applied.name()); + } + _ => panic!("failed test"), + } + } + + #[test] + fn aborts_on_missing_migration_on_database() { + let mut conn = Connection::open_in_memory().unwrap(); + + missing::migrations::runner().run(&mut conn).unwrap(); + + let migration1 = Migration::unapplied( + "V1__initial", + concat!( + "CREATE TABLE persons (", + "id int,", + "name varchar(255),", + "city varchar(255)", + ");" + ), + ) + .unwrap(); + + let migration2 = Migration::unapplied( + "V2__add_cars_table", + include_str!("./migrations_missing/V2__add_cars_table.sql"), + ) + .unwrap(); + let err = conn + .migrate( + &[migration1, migration2], + true, + true, + false, + Target::Latest, + DEFAULT_TABLE_NAME, + ) + .unwrap_err(); + match err.kind() { + Kind::MissingVersion(missing) => { + assert_eq!(1, missing.version()); + assert_eq!("initial", missing.name()); + } + _ => panic!("failed test"), + } + } + + #[test] + fn migrates_from_config() { + let db = tempfile::tempdir().unwrap(); + let path = db.path().join("test.db"); + let mut config = Config::new(ConfigDbType::Duckdb).set_db_path(path.to_str().unwrap()); + + let migrations = get_migrations(); + let runner = Runner::new(&migrations) + .set_grouped(false) + .set_abort_divergent(true) + .set_abort_missing(true); + + runner.run(&mut config).unwrap(); + + let applied_migrations = runner.get_applied_migrations(&mut config).unwrap(); + assert_eq!(5, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + assert_eq!(migrations[3].version(), applied_migrations[3].version()); + assert_eq!(migrations[4].version(), applied_migrations[4].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + assert_eq!(migrations[3].name(), applied_migrations[3].name()); + assert_eq!(migrations[4].name(), applied_migrations[4].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + assert_eq!(migrations[3].checksum(), applied_migrations[3].checksum()); + assert_eq!(migrations[4].checksum(), applied_migrations[4].checksum()); + } + + #[test] + fn migrate_from_config_report_contains_migrations() { + let db = tempfile::tempdir().unwrap(); + let path = db.path().join("test.db"); + let mut config = Config::new(ConfigDbType::Duckdb).set_db_path(path.to_str().unwrap()); + + let migrations = get_migrations(); + let runner = Runner::new(&migrations) + .set_grouped(false) + .set_abort_divergent(true) + .set_abort_missing(true); + + let report = runner.run(&mut config).unwrap(); + + let applied_migrations = report.applied_migrations(); + assert_eq!(5, applied_migrations.len()); + + assert_eq!(migrations[0].version(), applied_migrations[0].version()); + assert_eq!(migrations[1].version(), applied_migrations[1].version()); + assert_eq!(migrations[2].version(), applied_migrations[2].version()); + assert_eq!(migrations[3].version(), applied_migrations[3].version()); + assert_eq!(migrations[4].version(), applied_migrations[4].version()); + + assert_eq!(migrations[0].name(), migrations[0].name()); + assert_eq!(migrations[1].name(), applied_migrations[1].name()); + assert_eq!(migrations[2].name(), applied_migrations[2].name()); + assert_eq!(migrations[3].name(), applied_migrations[3].name()); + assert_eq!(migrations[4].name(), applied_migrations[4].name()); + + assert_eq!(migrations[0].checksum(), applied_migrations[0].checksum()); + assert_eq!(migrations[1].checksum(), applied_migrations[1].checksum()); + assert_eq!(migrations[2].checksum(), applied_migrations[2].checksum()); + assert_eq!(migrations[3].checksum(), applied_migrations[3].checksum()); + assert_eq!(migrations[4].checksum(), applied_migrations[4].checksum()); + } + + #[test] + fn migrate_from_config_report_returns_last_applied_migration() { + let db = tempfile::tempdir().unwrap(); + let path = db.path().join("test.db"); + let mut config = Config::new(ConfigDbType::Duckdb).set_db_path(path.to_str().unwrap()); + + let migrations = get_migrations(); + let runner = Runner::new(&migrations) + .set_grouped(false) + .set_abort_divergent(true) + .set_abort_missing(true); + + runner.run(&mut config).unwrap(); + + let applied_migration = runner + .get_last_applied_migration(&mut config) + .unwrap() + .unwrap(); + assert_eq!(5, applied_migration.version()); + + assert_eq!(migrations[4].version(), applied_migration.version()); + assert_eq!(migrations[4].name(), applied_migration.name()); + assert_eq!(migrations[4].checksum(), applied_migration.checksum()); + } + + #[test] + fn doesnt_run_migrations_if_fake_version() { + let mut conn = Connection::open_in_memory().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::FakeVersion(2)) + .run(&mut conn) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[1].checksum(); + + assert_eq!(2, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let err: Result = conn.query_row( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + [], + |row| row.get(0), + ); + + assert!(matches!(err.unwrap_err(), Error::QueryReturnedNoRows)); + } + + #[test] + fn doesnt_run_migrations_if_fake() { + let mut conn = Connection::open_in_memory().unwrap(); + + let report = embedded::migrations::runner() + .set_target(Target::Fake) + .run(&mut conn) + .unwrap(); + + let applied_migrations = report.applied_migrations(); + + assert!(applied_migrations.is_empty()); + + let current = conn + .get_last_applied_migration(DEFAULT_TABLE_NAME) + .unwrap() + .unwrap(); + let migrations = get_migrations(); + let mchecksum = migrations[3].checksum(); + + assert_eq!(4, current.version()); + assert_eq!(mchecksum, current.checksum()); + + let err: Result = conn.query_row( + "SELECT table_name FROM information_schema.tables WHERE table_name='persons'", + [], + |row| row.get(0), + ); + + assert!(matches!(err.unwrap_err(), Error::QueryReturnedNoRows)); + } + + #[test] + fn migrates_from_cli() { + run_test(|| { + Command::new("refinery") + .args(&[ + "migrate", + "-c", + "tests/duckdb_refinery.toml", + "-p", + "tests/migrations", + ]) + .unwrap() + .assert() + .stdout(contains("applying migration: V2__add_cars_and_motos_table")) + .stdout(contains("applying migration: V3__add_brand_to_cars_table")); + }) + } +} diff --git a/refinery/tests/duckdb_refinery.toml b/refinery/tests/duckdb_refinery.toml new file mode 100644 index 00000000..b17d99f0 --- /dev/null +++ b/refinery/tests/duckdb_refinery.toml @@ -0,0 +1,4 @@ +[main] +envix = "Develop" +db_type = "Duckdb" +db_path = "./db.sql" diff --git a/refinery/tests/sql_migrations/V1-2/V1__initial.sql b/refinery/tests/sql_migrations/V1-2/V1__initial.sql new file mode 100644 index 00000000..f056ee8b --- /dev/null +++ b/refinery/tests/sql_migrations/V1-2/V1__initial.sql @@ -0,0 +1,7 @@ +CREATE SEQUENCE persons_id_seq; + +CREATE TABLE persons( + id integer PRIMARY KEY DEFAULT nextval('persons_id_seq'), + name varchar(255), + city varchar(255) +); diff --git a/refinery/tests/sql_migrations/V1-2/V2__add_cars_and_motos_table.sql b/refinery/tests/sql_migrations/V1-2/V2__add_cars_and_motos_table.sql new file mode 100644 index 00000000..13390796 --- /dev/null +++ b/refinery/tests/sql_migrations/V1-2/V2__add_cars_and_motos_table.sql @@ -0,0 +1,8 @@ +CREATE TABLE cars ( + id int, + name varchar(255) +); +CREATE TABLE motos ( + id int, + name varchar(255) +); diff --git a/refinery/tests/sql_migrations/V3/V3__add_brand_to_cars_table.sql b/refinery/tests/sql_migrations/V3/V3__add_brand_to_cars_table.sql new file mode 100644 index 00000000..c689a6aa --- /dev/null +++ b/refinery/tests/sql_migrations/V3/V3__add_brand_to_cars_table.sql @@ -0,0 +1,2 @@ +ALTER TABLE cars +ADD brand varchar(255); diff --git a/refinery/tests/sql_migrations/V4__add_year_to_motos_table.sql b/refinery/tests/sql_migrations/V4__add_year_to_motos_table.sql new file mode 100644 index 00000000..964d1bf3 --- /dev/null +++ b/refinery/tests/sql_migrations/V4__add_year_to_motos_table.sql @@ -0,0 +1,2 @@ +ALTER TABLE motos + ADD COLUMN brand VARCHAR(255); diff --git a/refinery_cli/Cargo.toml b/refinery_cli/Cargo.toml index 285a3098..93663e7b 100644 --- a/refinery_cli/Cargo.toml +++ b/refinery_cli/Cargo.toml @@ -15,12 +15,13 @@ name = "refinery" path = "src/main.rs" [features] -default = ["mysql", "postgresql", "sqlite-bundled", "mssql"] +default = ["mysql", "postgresql", "sqlite-bundled", "mssql", "duckdb"] postgresql = ["refinery-core/postgres"] mysql = ["refinery-core/mysql"] sqlite = ["refinery-core/rusqlite"] sqlite-bundled = ["sqlite", "refinery-core/rusqlite-bundled"] mssql = ["refinery-core/tiberius-config", "tokio"] +duckdb = ["refinery-core/duckdb"] [dependencies] refinery-core = { version = "0.8.11", path = "../refinery_core", default-features = false } diff --git a/refinery_cli/src/migrate.rs b/refinery_cli/src/migrate.rs index f35c5d64..5948d946 100644 --- a/refinery_cli/src/migrate.rs +++ b/refinery_cli/src/migrate.rs @@ -86,9 +86,12 @@ fn run_migrations( } } } - _db_type @ (ConfigDbType::Mysql | ConfigDbType::Postgres | ConfigDbType::Sqlite) => { + _db_type @ (ConfigDbType::Mysql + | ConfigDbType::Postgres + | ConfigDbType::Sqlite + | ConfigDbType::Duckdb) => { cfg_if::cfg_if! { - if #[cfg(any(feature = "mysql", feature = "postgresql", feature = "sqlite"))] { + if #[cfg(any(feature = "mysql", feature = "postgresql", feature = "sqlite", feature = "duckdb"))] { Runner::new(&migrations) .set_grouped(grouped) .set_abort_divergent(divergent) diff --git a/refinery_core/Cargo.toml b/refinery_core/Cargo.toml index e32f4325..732631cc 100644 --- a/refinery_core/Cargo.toml +++ b/refinery_core/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "refinery-core" version = "0.8.11" -authors = ["Katharina Fey ", "João Oliveira "] +authors = [ + "Katharina Fey ", + "João Oliveira ", +] description = "This crate should not be used directly, it is internally related to Refinery" license = "MIT OR Apache-2.0" documentation = "https://docs.rs/refinery/" @@ -9,12 +12,13 @@ repository = "https://github.com/rust-db/refinery" edition = "2018" [features] -default = [] +default = ["duckdb"] rusqlite-bundled = ["rusqlite", "rusqlite/bundled"] tiberius = ["dep:tiberius", "futures", "tokio", "tokio/net"] tiberius-config = ["tiberius", "tokio", "tokio-util"] tokio-postgres = ["dep:tokio-postgres", "tokio", "tokio/rt"] mysql_async = ["dep:mysql_async"] +duckdb = ["dep:duckdb"] [dependencies] async-trait = "0.1" @@ -32,16 +36,26 @@ walkdir = "2.3.1" rusqlite = { version = ">= 0.23, <= 0.30", optional = true } postgres = { version = ">=0.17, <= 0.19", optional = true } tokio-postgres = { version = ">= 0.5, <= 0.7", optional = true } -mysql = { version = ">= 21.0.0, <= 24", optional = true, default-features = false, features = ["minimal"] } -mysql_async = { version = ">= 0.28, <= 0.33", optional = true, default-features = false, features = ["minimal"] } +mysql = { version = ">= 21.0.0, <= 24", optional = true, default-features = false, features = [ + "minimal", +] } +mysql_async = { version = ">= 0.28, <= 0.33", optional = true, default-features = false, features = [ + "minimal", +] } tiberius = { version = ">= 0.7, <= 0.12", optional = true, default-features = false } +duckdb = { version = "0.9.2", optional = true, features = [ "bundled" ] } tokio = { version = "1.0", optional = true } futures = { version = "0.3.16", optional = true, features = ["async-await"] } tokio-util = { version = "0.7.7", features = ["compat"], optional = true } time = { version = "0.3.5", features = ["parsing", "formatting"] } [dev-dependencies] -barrel = { git = "https://github.com/jxs/barrel", features = ["sqlite3", "pg", "mysql", "mssql"] } +barrel = { git = "https://github.com/jxs/barrel", features = [ + "sqlite3", + "pg", + "mysql", + "mssql", +] } tempfile = "3.1.0" [package.metadata.docs.rs] diff --git a/refinery_core/src/config.rs b/refinery_core/src/config.rs index c1fac221..8bd0607c 100644 --- a/refinery_core/src/config.rs +++ b/refinery_core/src/config.rs @@ -20,6 +20,7 @@ pub enum ConfigDbType { Postgres, Sqlite, Mssql, + Duckdb, } impl Config { @@ -102,7 +103,7 @@ impl Config { } cfg_if::cfg_if! { - if #[cfg(feature = "rusqlite")] { + if #[cfg(any(feature = "rusqlite", feature = "duckdb"))] { pub(crate) fn db_path(&self) -> Option<&Path> { self.main.db_path.as_deref() } @@ -282,24 +283,28 @@ struct Main { pub(crate) fn build_db_url(name: &str, config: &Config) -> String { let mut url: String = name.to_string() + "://"; - if let Some(user) = &config.main.db_user { - url = url + user; + if let Some(user) = config.main.db_user.as_deref() { + url.push_str(user); } - if let Some(pass) = &config.main.db_pass { - url = url + ":" + pass; + if let Some(pass) = config.main.db_pass.as_deref() { + url.push(':'); + url.push_str(pass); } - if let Some(host) = &config.main.db_host { + if let Some(host) = config.main.db_host.as_deref() { if config.main.db_user.is_some() { - url = url + "@" + host; + url.push('@'); + url.push_str(host); } else { - url = url + host; + url.push_str(host); } } - if let Some(port) = &config.main.db_port { - url = url + ":" + port; + if let Some(port) = config.main.db_port.as_deref() { + url.push(':'); + url.push_str(port); } - if let Some(name) = &config.main.db_name { - url = url + "/" + name; + if let Some(name) = config.main.db_name.as_deref() { + url.push('/'); + url.push_str(name); } url } diff --git a/refinery_core/src/drivers/config.rs b/refinery_core/src/drivers/config.rs index 92a00582..b97b4557 100644 --- a/refinery_core/src/drivers/config.rs +++ b/refinery_core/src/drivers/config.rs @@ -48,7 +48,12 @@ impl AsyncQuery> for Config { } } // this is written as macro so that we don't have to deal with type signatures -#[cfg(any(feature = "mysql", feature = "postgres", feature = "rusqlite"))] +#[cfg(any( + feature = "mysql", + feature = "postgres", + feature = "rusqlite", + feature = "duckdb" +))] macro_rules! with_connection { ($config:ident, $op: expr) => { match $config.db_type() { @@ -90,6 +95,18 @@ macro_rules! with_connection { ConfigDbType::Mssql => { panic!("tried to synchronously migrate from config for a mssql database, but tiberius is an async driver"); } + ConfigDbType::Duckdb => { + cfg_if::cfg_if! { + if #[cfg(feature = "duckdb")] { + //may have been checked earlier on config parsing, even if not let it fail with a Rusqlite db file not found error + let path = $config.db_path().map(|p| p.to_path_buf()).unwrap_or_default(); + let conn = duckdb::Connection::open(path).migration_err("could not open database", None)?; + $op(conn) + } else { + panic!("tried to migrate from config for a sqlite database, but feature rusqlite not enabled!"); + } + } + } } } } @@ -154,12 +171,20 @@ macro_rules! with_connection_async { } } } + ConfigDbType::Duckdb => { + panic!("tried to migrate async from config for a duckdb database, but this feature is not implemented yet"); + } } } } // rewrite all the default methods as we overrode Transaction and Query -#[cfg(any(feature = "mysql", feature = "postgres", feature = "rusqlite"))] +#[cfg(any( + feature = "mysql", + feature = "postgres", + feature = "rusqlite", + feature = "duckdb" +))] impl crate::Migrate for Config { fn get_last_applied_migration( &mut self, diff --git a/refinery_core/src/drivers/ducdb.rs b/refinery_core/src/drivers/ducdb.rs new file mode 100644 index 00000000..406defc1 --- /dev/null +++ b/refinery_core/src/drivers/ducdb.rs @@ -0,0 +1,56 @@ +use crate::traits::sync::{Migrate, Query, Transaction}; +use crate::Migration; +use duckdb::{Connection as DqlConnection, Error as DqlError}; +use time::format_description::well_known::Rfc3339; +use time::OffsetDateTime; + +fn query_applied_migrations( + transaction: &DqlConnection, + query: &str, +) -> Result, DqlError> { + let mut stmt = transaction.prepare(query)?; + let mut rows = stmt.query([])?; + let mut applied = Vec::new(); + while let Some(row) = rows.next()? { + let version = row.get(0)?; + let applied_on: String = row.get(2)?; + // Safe to call unwrap, as we stored it in RFC3339 format on the database + let applied_on = OffsetDateTime::parse(&applied_on, &Rfc3339).unwrap(); + + let checksum: String = row.get(3)?; + applied.push(Migration::applied( + version, + row.get(1)?, + applied_on, + checksum + .parse::() + .expect("checksum must be a valid u64"), + )); + } + Ok(applied) +} + +impl Transaction for DqlConnection { + type Error = DqlError; + fn execute(&mut self, queries: &[&str]) -> Result { + let transaction = self.transaction()?; + let mut count = 0; + for query in queries.iter() { + transaction.execute_batch(query)?; + count += 1; + } + transaction.commit()?; + Ok(count) + } +} + +impl Query> for DqlConnection { + fn query(&mut self, query: &str) -> Result, Self::Error> { + let transaction = self.transaction()?; + let applied = query_applied_migrations(&transaction, query)?; + transaction.commit()?; + Ok(applied) + } +} + +impl Migrate for DqlConnection {} diff --git a/refinery_core/src/drivers/mod.rs b/refinery_core/src/drivers/mod.rs index 867d4c4d..65dc8aff 100644 --- a/refinery_core/src/drivers/mod.rs +++ b/refinery_core/src/drivers/mod.rs @@ -16,4 +16,7 @@ pub mod mysql; #[cfg(feature = "tiberius")] pub mod tiberius; +#[cfg(feature = "duckdb")] +pub mod ducdb; + mod config; diff --git a/refinery_core/src/lib.rs b/refinery_core/src/lib.rs index 35d02a47..3a834320 100644 --- a/refinery_core/src/lib.rs +++ b/refinery_core/src/lib.rs @@ -28,3 +28,6 @@ pub use mysql_async; #[cfg(feature = "tiberius")] pub use tiberius; + +#[cfg(feature = "duckdb")] +pub use duckdb;