diff --git a/CHANGELOG.md b/CHANGELOG.md index 3fadc3d093..65ac125096 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,25 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 0.9.0-alpha.1 - 2025-05-19 + +Accumulated changes since the beginning of the alpha cycle. Effectively a draft CHANGELOG for the 0.9.0 release. + +This section will be replaced in subsequent alpha releases. See the Git history of this file for previous alphas. + +### Breaking + +* [[#3821]] Groundwork for 0.9.0-alpha.1 + * Increased MSRV to 1.86 and set rust-version [@abonander] + * Deleted deprecated combination runtime+TLS features (e.g. `runtime-tokio-native-tls`) + * Deleted re-export of unstable `TransactionManager` trait in `sqlx`. + * Not technically a breaking change because it's `#[doc(hidden)]`, + but [it _will_ break SeaORM][seaorm-2600] if not proactively fixed. + +[seaorm-2600]: https://github.com/SeaQL/sea-orm/issues/2600 + +[#3821]: https://github.com/launchbadge/sqlx/pull/3830 + ## 0.8.6 - 2025-05-19 9 pull requests were merged this release cycle. diff --git a/Cargo.lock b/Cargo.lock index 64634c69da..bb4bf14198 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3394,7 +3394,7 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "anyhow", "async-std", @@ -3424,7 +3424,7 @@ dependencies = [ [[package]] name = "sqlx-cli" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "anyhow", "assert_cmd", @@ -3448,7 +3448,7 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "async-io 1.13.0", "async-std", @@ -3624,7 +3624,7 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "proc-macro2", "quote", @@ -3635,7 +3635,7 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "async-std", "dotenvy", @@ -3659,7 +3659,7 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "atoi", "base64 0.22.1", @@ -3705,7 +3705,7 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "atoi", "base64 0.22.1", @@ -3751,7 +3751,7 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.6" +version = "0.9.0-alpha.1" dependencies = [ "atoi", "chrono", diff --git a/Cargo.toml b/Cargo.toml index 2419b50d97..111ee86f9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,8 +23,9 @@ members = [ ] [workspace.package] -version = "0.8.6" +version = "0.9.0-alpha.1" license = "MIT OR Apache-2.0" +# TODO: upgrade to edition 2024 (after merging all pending PRs) edition = "2021" repository = "https://github.com/launchbadge/sqlx" keywords = ["database", "async", "postgres", "mysql", "sqlite"] @@ -35,8 +36,7 @@ authors = [ "Chloe Ross ", "Daniel Akhterov ", ] -# TODO: enable this for 0.9.0 -# rust-version = "1.80.0" +rust-version = "1.86.0" [package] name = "sqlx" @@ -48,6 +48,7 @@ license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true +rust-version.workspace = true [package.metadata.docs.rs] features = ["all-databases", "_unstable-all-types", "sqlite-preupdate-hook"] @@ -91,14 +92,6 @@ tls-rustls-ring-native-roots = ["sqlx-core/_tls-rustls-ring-native-roots", "sqlx # No-op feature used by the workflows to compile without TLS enabled. Not meant for general use. tls-none = [] -# Legacy Runtime + TLS features - -runtime-async-std-native-tls = ["runtime-async-std", "tls-native-tls"] -runtime-async-std-rustls = ["runtime-async-std", "tls-rustls-ring"] - -runtime-tokio-native-tls = ["runtime-tokio", "tls-native-tls"] -runtime-tokio-rustls = ["runtime-tokio", "tls-rustls-ring"] - # for conditional compilation _rt-async-std = [] _rt-tokio = [] @@ -129,17 +122,17 @@ bstr = ["sqlx-core/bstr"] [workspace.dependencies] # Core Crates -sqlx-core = { version = "=0.8.6", path = "sqlx-core" } -sqlx-macros-core = { version = "=0.8.6", path = "sqlx-macros-core" } -sqlx-macros = { version = "=0.8.6", path = "sqlx-macros" } +sqlx-core = { version = "=0.9.0-alpha.1", path = "sqlx-core" } +sqlx-macros-core = { version = "=0.9.0-alpha.1", path = "sqlx-macros-core" } +sqlx-macros = { version = "=0.9.0-alpha.1", path = "sqlx-macros" } # Driver crates -sqlx-mysql = { version = "=0.8.6", path = "sqlx-mysql" } -sqlx-postgres = { version = "=0.8.6", path = "sqlx-postgres" } -sqlx-sqlite = { version = "=0.8.6", path = "sqlx-sqlite" } +sqlx-mysql = { version = "=0.9.0-alpha.1", path = "sqlx-mysql" } +sqlx-postgres = { version = "=0.9.0-alpha.1", path = "sqlx-postgres" } +sqlx-sqlite = { version = "=0.9.0-alpha.1", path = "sqlx-sqlite" } # Facade crate (for reference from sqlx-cli) -sqlx = { version = "=0.8.6", path = ".", default-features = false } +sqlx = { version = "=0.9.0-alpha.1", path = ".", default-features = false } # Common type integrations shared by multiple driver crates. # These are optional unless enabled in a workspace crate. diff --git a/FAQ.md b/FAQ.md index cf13cf73ee..4b9a4ac59d 100644 --- a/FAQ.md +++ b/FAQ.md @@ -18,6 +18,7 @@ As a rule, however, we only officially support the range of versions for each da For each database and where applicable, we test against the latest and oldest versions that we intend to support. You can see the current versions being tested against by looking at our CI config: https://github.com/launchbadge/sqlx/blob/main/.github/workflows/sqlx.yml#L168 ------------------------------------------------------------------- + ### What versions of Rust does SQLx support? What is SQLx's MSRV\*? SQLx's MSRV is the second-to-latest stable release as of the beginning of the current release cycle (`0.x.0`). diff --git a/README.md b/README.md index cc0ecf2e66..747ad079f0 100644 --- a/README.md +++ b/README.md @@ -165,16 +165,8 @@ be removed in the future. - `runtime-async-std`: Use the `async-std` runtime without enabling a TLS backend. -- `runtime-async-std-native-tls`: Use the `async-std` runtime and `native-tls` TLS backend (SOFT-DEPRECATED). - -- `runtime-async-std-rustls`: Use the `async-std` runtime and `rustls` TLS backend (SOFT-DEPRECATED). - - `runtime-tokio`: Use the `tokio` runtime without enabling a TLS backend. -- `runtime-tokio-native-tls`: Use the `tokio` runtime and `native-tls` TLS backend (SOFT-DEPRECATED). - -- `runtime-tokio-rustls`: Use the `tokio` runtime and `rustls` TLS backend (SOFT-DEPRECATED). - - Actix-web is fully compatible with Tokio and so a separate runtime feature is no longer needed. - `tls-native-tls`: Use the `native-tls` TLS backend (OpenSSL on *nix, SChannel on Windows, Secure Transport on macOS). diff --git a/benches/sqlite/describe.rs b/benches/sqlite/describe.rs index 470c1f7362..5d353b3d3c 100644 --- a/benches/sqlite/describe.rs +++ b/benches/sqlite/describe.rs @@ -3,7 +3,7 @@ use criterion::Criterion; use criterion::{criterion_group, criterion_main}; use sqlx::sqlite::{Sqlite, SqliteConnection}; -use sqlx::{Connection, Executor}; +use sqlx::Executor; use sqlx_test::new; // Here we have an async function to benchmark diff --git a/examples/postgres/listen/src/main.rs b/examples/postgres/listen/src/main.rs index b9ed35ba4b..49d97a466d 100644 --- a/examples/postgres/listen/src/main.rs +++ b/examples/postgres/listen/src/main.rs @@ -98,9 +98,9 @@ from ( ) notifies(chan, payload) "#, ) - .bind(&COUNTER.fetch_add(1, Ordering::SeqCst)) - .bind(&COUNTER.fetch_add(1, Ordering::SeqCst)) - .bind(&COUNTER.fetch_add(1, Ordering::SeqCst)) + .bind(COUNTER.fetch_add(1, Ordering::SeqCst)) + .bind(COUNTER.fetch_add(1, Ordering::SeqCst)) + .bind(COUNTER.fetch_add(1, Ordering::SeqCst)) .execute(pool) .await; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 29f0b09695..f406d73736 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ # Note: should NOT increase during a minor/patch release cycle [toolchain] -channel = "1.78" +channel = "1.86" profile = "minimal" diff --git a/sqlx-bench/Cargo.toml b/sqlx-bench/Cargo.toml deleted file mode 100644 index 0aa9532034..0000000000 --- a/sqlx-bench/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "sqlx-bench" -version = "0.1.0" -authors = ["Austin Bonander "] -edition = "2021" -publish = false - -[features] -runtime-actix-native-tls = ["runtime-tokio-native-tls"] -runtime-async-std-native-tls = [ - "sqlx/runtime-async-std-native-tls", -] -runtime-tokio-native-tls = [ - "sqlx/runtime-tokio-native-tls", -] - -runtime-actix-rustls = ["runtime-tokio-rustls"] -runtime-async-std-rustls = [ - "sqlx/runtime-async-std-rustls", -] -runtime-tokio-rustls = [ - "sqlx/runtime-tokio-rustls", -] - -postgres = ["sqlx/postgres"] -sqlite = ["sqlx/sqlite"] - -[dependencies] -criterion = "0.5.1" -dotenvy = "0.15.0" -once_cell = "1.4" -sqlx = { workspace = true, default-features = false, features = ["macros"] } - -chrono = "0.4.19" - -[[bench]] -name = "pg_pool" -harness = false -required-features = ["postgres"] - -[[bench]] -name = "sqlite_fetch_all" -harness = false -required-features = ["sqlite"] - -[lints] -workspace = true diff --git a/sqlx-bench/README.md b/sqlx-bench/README.md deleted file mode 100644 index f9903c748e..0000000000 --- a/sqlx-bench/README.md +++ /dev/null @@ -1,39 +0,0 @@ -SQLx Self-Benchmarks -==================== - -This Cargo project implements various benchmarks for SQLx using -[Criterion](https://crates.io/crates/criterion). - -### Available Benchmarks - -* Group `pg_pool`: benchmarks `sqlx::Pool` against a PostgreSQL server. - * `DATABASE_URL` must be set (or in `.env`) pointing to a PostgreSQL server. - It should preferably be running on the same machine as the benchmarks to reduce latency. - * The `postgres` feature must be enabled for this benchmark to run. - * Benchmarks: - * `bench_pgpool_acquire`: benchmarks `Pool::acquire()` when many concurrent tasks are also using - the pool, with or without the pool being fair. Concurrently to the benchmark iteration - function calling and blocking on `Pool::acquire()`, a varying number of background tasks are - also calling `acquire()` and holding the acquired connection for 500µs each before releasing - it back to the pool. The pool is created with `.min_connections(50).max_connections(50)` so we shouldn't - be measuring anything but the actual overhead of `Pool`'s bookkeeping. - -### Running - -You must choose a runtime to execute the benchmarks on; the feature flags are the same as the `sqlx` crate: - -```bash -cargo bench --features runtime-tokio-native-tls -cargo bench --features runtime-async-std-rustls -``` - -When complete, the benchmark results will be in `target/criterion/`. -Open `target/criterion/report/index.html` or pick one of the benchmark subfolders and open -`report/index.html` there to view the results. - -Benchmark Results -------- - -If you want to share the results here, please follow the format below. - -* [2020/07/01: `pg_pool` benchmark added to test pool fairness changes](results/2020-07-01-bench_pgpool_acquire/REPORT.md) diff --git a/sqlx-bench/benches/pg_pool.rs b/sqlx-bench/benches/pg_pool.rs deleted file mode 100644 index ccea9bc6a9..0000000000 --- a/sqlx-bench/benches/pg_pool.rs +++ /dev/null @@ -1,80 +0,0 @@ -use criterion::{criterion_group, criterion_main, Bencher, Criterion}; -use sqlx::PgPool; - -use sqlx::postgres::PgPoolOptions; -use std::time::{Duration, Instant}; - -fn bench_pgpool_acquire(c: &mut Criterion) { - let mut group = c.benchmark_group("bench_pgpool_acquire"); - - for &concurrent in [5u32, 10, 50, 100, 500, 1000, 5000 /*, 10_000, 50_000*/].iter() { - for &fair in [false, true].iter() { - let fairness = if fair { "(fair)" } else { "(unfair)" }; - - group.bench_with_input( - format!("{concurrent} concurrent {fairness}"), - &(concurrent, fair), - |b, &(concurrent, fair)| do_bench_acquire(b, concurrent, fair), - ); - } - } - - group.finish(); -} - -fn do_bench_acquire(b: &mut Bencher, concurrent: u32, fair: bool) { - let pool = sqlx::__rt::block_on( - PgPoolOptions::new() - // we don't want timeouts because we want to see how the pool degrades - .acquire_timeout(Duration::from_secs(3600)) - // force the pool to start full - .min_connections(50) - .max_connections(50) - // we're not benchmarking `ping()` - .test_before_acquire(false) - .__fair(fair) - .connect( - &dotenvy::var("DATABASE_URL").expect("DATABASE_URL must be set to run benchmarks"), - ), - ) - .expect("failed to open PgPool"); - - for _ in 0..concurrent { - let pool = pool.clone(); - sqlx::__rt::enter_runtime(|| { - sqlx::__rt::spawn(async move { - while !pool.is_closed() { - let conn = match pool.acquire().await { - Ok(conn) => conn, - Err(sqlx::Error::PoolClosed) => break, - Err(e) => panic!("failed to acquire concurrent connection: {e}"), - }; - - // pretend we're using the connection - sqlx::__rt::sleep(Duration::from_micros(500)).await; - drop(criterion::black_box(conn)); - } - }) - }); - } - - b.iter_custom(|iters| { - sqlx::__rt::block_on(async { - // take the start time inside the future to make sure we only count once it's running - let start = Instant::now(); - for _ in 0..iters { - criterion::black_box( - pool.acquire() - .await - .expect("failed to acquire connection for benchmark"), - ); - } - start.elapsed() - }) - }); - - sqlx::__rt::block_on(pool.close()); -} - -criterion_group!(pg_pool, bench_pgpool_acquire); -criterion_main!(pg_pool); diff --git a/sqlx-bench/benches/sqlite_fetch_all.rs b/sqlx-bench/benches/sqlite_fetch_all.rs deleted file mode 100644 index 8be770a213..0000000000 --- a/sqlx-bench/benches/sqlite_fetch_all.rs +++ /dev/null @@ -1,45 +0,0 @@ -use sqlx::{Connection, Executor}; - -use std::time::Instant; - -#[derive(sqlx::FromRow)] -struct Test { - id: i32, -} - -fn main() -> sqlx::Result<()> { - sqlx::__rt::block_on(async { - let mut conn = sqlx::SqliteConnection::connect("sqlite://test.db?mode=rwc").await?; - let delete_sql = "DROP TABLE IF EXISTS test"; - conn.execute(delete_sql).await?; - - let create_sql = "CREATE TABLE IF NOT EXISTS test (id INTEGER PRIMARY KEY NOT NULL)"; - conn.execute(create_sql).await?; - - let mut tx = conn.begin().await?; - for entry in 0i32..100000 { - sqlx::query("INSERT INTO test (id) VALUES ($1)") - .bind(entry) - .execute(&mut tx) - .await?; - } - tx.commit().await?; - - for _ in 0..10i8 { - let start = chrono::Utc::now(); - - println!( - "total: {}", - sqlx::query!("SELECT id from test") - .fetch_all(&mut conn) - .await? - .len() - ); - - let elapsed = chrono::Utc::now() - start; - println!("elapsed {elapsed}"); - } - - Ok(()) - }) -} diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/1000_fair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/1000_fair_pdf_small.svg deleted file mode 100644 index 6446c35b8b..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/1000_fair_pdf_small.svg +++ /dev/null @@ -1,64 +0,0 @@ - - -Density (a.u.) - - -Average Time (ms) - - - -0.05 - - - -0.1 - - - -0.15 - - - -0.2 - - - -0.25 - - - -0.3 - - - -0.35 - - - -0.4 - - - -0.45 - - - -0.5 - - - - -30 - - - -32 - - - -34 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/1000_unfair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/1000_unfair_pdf_small.svg deleted file mode 100644 index 309a62b2ec..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/1000_unfair_pdf_small.svg +++ /dev/null @@ -1,68 +0,0 @@ - - -Density (a.u.) - - -Average Time (ms) - - - -5e-4 - - - -0.001 - - - -0.0015 - - - -0.002 - - - -0.0025 - - - -0.003 - - - -0.0035 - - - -0.004 - - - -0.0045 - - - -0.005 - - - -0.0055 - - - - -0 - - - -200 - - - -400 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/100_fair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/100_fair_pdf_small.svg deleted file mode 100644 index 65f2e6206e..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/100_fair_pdf_small.svg +++ /dev/null @@ -1,60 +0,0 @@ - - -Density (a.u.) - - -Average Time (ms) - - - -0.1 - - - -0.2 - - - -0.3 - - - -0.4 - - - -0.5 - - - -0.6 - - - -0.7 - - - -0.8 - - - - -0 - - - -1 - - - -2 - - - -3 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/100_unfair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/100_unfair_pdf_small.svg deleted file mode 100644 index db708916a8..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/100_unfair_pdf_small.svg +++ /dev/null @@ -1,60 +0,0 @@ - - -Density (a.u.) - - -Average Time (ms) - - - -0.02 - - - -0.04 - - - -0.06 - - - -0.08 - - - -0.1 - - - -0.12 - - - -0.14 - - - -0.16 - - - -0.18 - - - - -0 - - - -5 - - - -10 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/10_fair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/10_fair_pdf_small.svg deleted file mode 100644 index b887f78e35..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/10_fair_pdf_small.svg +++ /dev/null @@ -1,56 +0,0 @@ - - -Density (a.u.) - - -Average Time (ns) - - - -0.01 - - - -0.02 - - - -0.03 - - - -0.04 - - - -0.05 - - - -0.06 - - - -0.07 - - - - -340 - - - -360 - - - -380 - - - -400 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/10_unfair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/10_unfair_pdf_small.svg deleted file mode 100644 index 884f443d1a..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/10_unfair_pdf_small.svg +++ /dev/null @@ -1,48 +0,0 @@ - - -Density (a.u.) - - -Average Time (ns) - - - -0.02 - - - -0.04 - - - -0.06 - - - -0.08 - - - -0.1 - - - - -340 - - - -350 - - - -360 - - - -370 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5000_fair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5000_fair_pdf_small.svg deleted file mode 100644 index 41bb9dd94c..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5000_fair_pdf_small.svg +++ /dev/null @@ -1,56 +0,0 @@ - - -Density (a.u.) - - -Average Time (ms) - - - -0.01 - - - -0.02 - - - -0.03 - - - -0.04 - - - -0.05 - - - -0.06 - - - -0.07 - - - - -160 - - - -170 - - - -180 - - - -190 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5000_unfair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5000_unfair_pdf_small.svg deleted file mode 100644 index 51c712ae53..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5000_unfair_pdf_small.svg +++ /dev/null @@ -1,68 +0,0 @@ - - -Density (a.u.) - - -Average Time (s) - - - -0.05 - - - -0.1 - - - -0.15 - - - -0.2 - - - -0.25 - - - -0.3 - - - -0.35 - - - -0.4 - - - -0.45 - - - -0.5 - - - - -0 - - - -2 - - - -4 - - - -6 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/500_fair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/500_fair_pdf_small.svg deleted file mode 100644 index 249f59b459..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/500_fair_pdf_small.svg +++ /dev/null @@ -1,44 +0,0 @@ - - -Density (a.u.) - - -Average Time (ms) - - - -0.2 - - - -0.4 - - - -0.6 - - - -0.8 - - - -1 - - - - -15 - - - -16 - - - -17 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/500_unfair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/500_unfair_pdf_small.svg deleted file mode 100644 index b9387fc3f0..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/500_unfair_pdf_small.svg +++ /dev/null @@ -1,52 +0,0 @@ - - -Density (a.u.) - - -Average Time (ms) - - - -0.002 - - - -0.004 - - - -0.006 - - - -0.008 - - - -0.01 - - - - -0 - - - -50 - - - -100 - - - -150 - - - -200 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/50_fair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/50_fair_pdf_small.svg deleted file mode 100644 index c8958b9e63..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/50_fair_pdf_small.svg +++ /dev/null @@ -1,52 +0,0 @@ - - -Density (a.u.) - - -Average Time (us) - - - -0.005 - - - -0.01 - - - -0.015 - - - -0.02 - - - -0.025 - - - - -0 - - - -50 - - - -100 - - - -150 - - - -200 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/50_unfair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/50_unfair_pdf_small.svg deleted file mode 100644 index e16502cccc..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/50_unfair_pdf_small.svg +++ /dev/null @@ -1,52 +0,0 @@ - - -Density (a.u.) - - -Average Time (us) - - - -0.005 - - - -0.01 - - - -0.015 - - - -0.02 - - - -0.025 - - - -0.03 - - - -0.035 - - - - -0 - - - -50 - - - -100 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5_fair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5_fair_pdf_small.svg deleted file mode 100644 index 3ebb0258bb..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5_fair_pdf_small.svg +++ /dev/null @@ -1,60 +0,0 @@ - - -Density (a.u.) - - -Average Time (ns) - - - -0.01 - - - -0.02 - - - -0.03 - - - -0.04 - - - -0.05 - - - -0.06 - - - -0.07 - - - - -340 - - - -360 - - - -380 - - - -400 - - - -420 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5_unfair_pdf_small.svg b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5_unfair_pdf_small.svg deleted file mode 100644 index c33fee3e09..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/5_unfair_pdf_small.svg +++ /dev/null @@ -1,60 +0,0 @@ - - -Density (a.u.) - - -Average Time (ns) - - - -0.01 - - - -0.02 - - - -0.03 - - - -0.04 - - - -0.05 - - - -0.06 - - - -0.07 - - - -0.08 - - - -0.09 - - - - -340 - - - -360 - - - -380 - - - - - diff --git a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/REPORT.md b/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/REPORT.md deleted file mode 100644 index 0fb71b14a7..0000000000 --- a/sqlx-bench/results/2020-07-01-bench_pgpool_acquire/REPORT.md +++ /dev/null @@ -1,32 +0,0 @@ -### 2020/07/01: `pg_pool` benchmark added to test pool fairness changes - -* Commit: 7c8ef602661c9cb62c8cf43aaf0f8faaf5b0aed5 -* Machine specs: - * (`lscpu`) Intel(R) Core(TM) i9-9900K CPU @ 3.60GHz - * 64 GB DDR4 RAM - * (Distro / `uname -a`) Arch Linux / kernel `5.6.15-zen2-1-zen` - * (`rustc -V`) `rustc 1.43.0-nightly (834bc5650 2020-02-24)` -* Command: `cargo bench --features runtime-tokio,postgres -- --measurement-time 30` - -| Connections | Unfair Pool | Fair Pool| -|-------------|-------------|----------| -| 5 | ![](5_unfair_pdf_small.svg) | ![](5_fair_pdf_small.svg) | -| 10 | ![](10_unfair_pdf_small.svg) | ![](10_fair_pdf_small.svg) | -| 50 | ![](50_unfair_pdf_small.svg) | ![](50_fair_pdf_small.svg) | -| 100 | ![](100_unfair_pdf_small.svg) | ![](100_fair_pdf_small.svg) | -| 500 | ![](500_unfair_pdf_small.svg) | ![](500_fair_pdf_small.svg) | -| 1000 | ![](1000_unfair_pdf_small.svg) | ![](1000_fair_pdf_small.svg) | -| 5000 | ![](5000_unfair_pdf_small.svg) | ![](5000_fair_pdf_small.svg) | - - -When the pool is set to be fair it appears the average time for `acquire()` at high contention -(500+ tasks using 50 pool connections) actually goes down by up to an order of magnitude since tasks -in the waiter queue aren't getting preempted. - -This appears to be the solution to our problem of tasks timing out waiting for a connection at high -load. At very low contention (5-10 tasks using 50 pool connections), the average time for `acqure()` -increases by perhaps 10% but a difference between 80µs and 90µs is hardly noticeable in real -applications. - -A normal MPMC channel may care about that kind of overhead, though, which is why most implementations -aren't perfectly fair. diff --git a/sqlx-bench/test.db b/sqlx-bench/test.db deleted file mode 100644 index 6d62cec842..0000000000 Binary files a/sqlx-bench/test.db and /dev/null differ diff --git a/sqlx-cli/Cargo.toml b/sqlx-cli/Cargo.toml index de87b38ba0..f8c821a8f8 100644 --- a/sqlx-cli/Cargo.toml +++ b/sqlx-cli/Cargo.toml @@ -14,6 +14,7 @@ authors = [ "Jesper Axelsson ", "Austin Bonander ", ] +rust-version.workspace = true [[bin]] name = "sqlx" @@ -49,20 +50,23 @@ backoff = { version = "0.4.0", features = ["futures", "tokio"] } [features] default = ["postgres", "sqlite", "mysql", "native-tls", "completions"] -rustls = ["sqlx/runtime-tokio-rustls"] -native-tls = ["sqlx/runtime-tokio-native-tls"] +rustls = ["sqlx/tls-rustls"] +native-tls = ["sqlx/tls-native-tls"] # databases mysql = ["sqlx/mysql"] postgres = ["sqlx/postgres"] -sqlite = ["sqlx/sqlite"] -sqlite-unbundled = ["sqlx/sqlite-unbundled"] +sqlite = ["sqlx/sqlite", "_sqlite"] +sqlite-unbundled = ["sqlx/sqlite-unbundled", "_sqlite"] # workaround for musl + openssl issues openssl-vendored = ["openssl/vendored"] completions = ["dep:clap_complete"] +# Conditional compilation only +_sqlite = [] + [dev-dependencies] assert_cmd = "2.0.11" tempfile = "3.10.1" diff --git a/sqlx-core/Cargo.toml b/sqlx-core/Cargo.toml index f6017a9fee..51b82fa68e 100644 --- a/sqlx-core/Cargo.toml +++ b/sqlx-core/Cargo.toml @@ -6,6 +6,7 @@ license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true +rust-version.workspace = true [package.metadata.docs.rs] features = ["offline"] diff --git a/sqlx-core/src/any/arguments.rs b/sqlx-core/src/any/arguments.rs index 59a0c0f765..2c05e3fd5b 100644 --- a/sqlx-core/src/any/arguments.rs +++ b/sqlx-core/src/any/arguments.rs @@ -32,7 +32,7 @@ impl<'q> Arguments<'q> for AnyArguments<'q> { pub struct AnyArgumentBuffer<'q>(#[doc(hidden)] pub Vec>); -impl<'q> Default for AnyArguments<'q> { +impl Default for AnyArguments<'_> { fn default() -> Self { AnyArguments { values: AnyArgumentBuffer(vec![]), diff --git a/sqlx-core/src/any/row.rs b/sqlx-core/src/any/row.rs index 310881da14..57b8590b5f 100644 --- a/sqlx-core/src/any/row.rs +++ b/sqlx-core/src/any/row.rs @@ -63,7 +63,7 @@ impl Row for AnyRow { } } -impl<'i> ColumnIndex for &'i str { +impl ColumnIndex for &'_ str { fn index(&self, row: &AnyRow) -> Result { row.column_names .get(*self) diff --git a/sqlx-core/src/any/statement.rs b/sqlx-core/src/any/statement.rs index 1fbb11895c..6d513e9a06 100644 --- a/sqlx-core/src/any/statement.rs +++ b/sqlx-core/src/any/statement.rs @@ -51,7 +51,7 @@ impl<'q> Statement<'q> for AnyStatement<'q> { impl_statement_query!(AnyArguments<'_>); } -impl<'i> ColumnIndex> for &'i str { +impl ColumnIndex> for &'_ str { fn index(&self, statement: &AnyStatement<'_>) -> Result { statement .column_names diff --git a/sqlx-core/src/ext/async_stream.rs b/sqlx-core/src/ext/async_stream.rs index 56777ca4db..c41d940981 100644 --- a/sqlx-core/src/ext/async_stream.rs +++ b/sqlx-core/src/ext/async_stream.rs @@ -95,7 +95,7 @@ impl Yielder { } } -impl<'a, T> Stream for TryAsyncStream<'a, T> { +impl Stream for TryAsyncStream<'_, T> { type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { diff --git a/sqlx-core/src/io/encode.rs b/sqlx-core/src/io/encode.rs index a603ea9325..ba032d294d 100644 --- a/sqlx-core/src/io/encode.rs +++ b/sqlx-core/src/io/encode.rs @@ -9,7 +9,7 @@ pub trait ProtocolEncode<'en, Context = ()> { fn encode_with(&self, buf: &mut Vec, context: Context) -> Result<(), crate::Error>; } -impl<'en, C> ProtocolEncode<'en, C> for &'_ [u8] { +impl ProtocolEncode<'_, C> for &'_ [u8] { fn encode_with(&self, buf: &mut Vec, _context: C) -> Result<(), crate::Error> { buf.extend_from_slice(self); Ok(()) diff --git a/sqlx-core/src/logger.rs b/sqlx-core/src/logger.rs index cf6dd533bd..18d5843d38 100644 --- a/sqlx-core/src/logger.rs +++ b/sqlx-core/src/logger.rs @@ -158,7 +158,7 @@ impl<'q> QueryLogger<'q> { } } -impl<'q> Drop for QueryLogger<'q> { +impl Drop for QueryLogger<'_> { fn drop(&mut self) { self.finish(); } diff --git a/sqlx-core/src/net/socket/mod.rs b/sqlx-core/src/net/socket/mod.rs index d11f15884e..1f24da8c40 100644 --- a/sqlx-core/src/net/socket/mod.rs +++ b/sqlx-core/src/net/socket/mod.rs @@ -62,7 +62,7 @@ pub struct Read<'a, S: ?Sized, B> { buf: &'a mut B, } -impl<'a, S: ?Sized, B> Future for Read<'a, S, B> +impl Future for Read<'_, S, B> where S: Socket, B: ReadBuf, @@ -90,7 +90,7 @@ pub struct Write<'a, S: ?Sized> { buf: &'a [u8], } -impl<'a, S: ?Sized> Future for Write<'a, S> +impl Future for Write<'_, S> where S: Socket, { @@ -116,7 +116,7 @@ pub struct Flush<'a, S: ?Sized> { socket: &'a mut S, } -impl<'a, S: Socket + ?Sized> Future for Flush<'a, S> { +impl Future for Flush<'_, S> { type Output = io::Result<()>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { @@ -128,7 +128,7 @@ pub struct Shutdown<'a, S: ?Sized> { socket: &'a mut S, } -impl<'a, S: ?Sized> Future for Shutdown<'a, S> +impl Future for Shutdown<'_, S> where S: Socket, { diff --git a/sqlx-core/src/net/tls/tls_rustls.rs b/sqlx-core/src/net/tls/tls_rustls.rs index 1a85cf0ff9..726df324aa 100644 --- a/sqlx-core/src/net/tls/tls_rustls.rs +++ b/sqlx-core/src/net/tls/tls_rustls.rs @@ -137,10 +137,7 @@ where .with_no_client_auth() } } else { - #[cfg(any(feature = "_tls-rustls-aws-lc-rs", feature = "_tls-rustls-ring-webpki"))] - let mut cert_store = certs_from_webpki(); - #[cfg(feature = "_tls-rustls-ring-native-roots")] - let mut cert_store = certs_from_native_store(); + let mut cert_store = import_root_certs(); if let Some(ca) = tls_config.root_cert_path { let data = ca.data().await?; @@ -211,13 +208,13 @@ fn private_key_from_pem(pem: Vec) -> Result, Error> { } } -#[cfg(any(feature = "_tls-rustls-aws-lc-rs", feature = "_tls-rustls-ring-webpki"))] -fn certs_from_webpki() -> RootCertStore { +#[cfg(all(feature = "webpki-roots", not(feature = "rustls-native-certs")))] +fn import_root_certs() -> RootCertStore { RootCertStore::from_iter(webpki_roots::TLS_SERVER_ROOTS.iter().cloned()) } -#[cfg(feature = "_tls-rustls-ring-native-roots")] -fn certs_from_native_store() -> RootCertStore { +#[cfg(feature = "rustls-native-certs")] +fn import_root_certs() -> RootCertStore { let mut root_cert_store = RootCertStore::empty(); let load_results = rustls_native_certs::load_native_certs(); @@ -225,7 +222,7 @@ fn certs_from_native_store() -> RootCertStore { log::warn!("Error loading native certificates: {e:?}"); } for cert in load_results.certs { - if let Err(e) = root_cert_store.add(cert.into()) { + if let Err(e) = root_cert_store.add(cert) { log::warn!("rustls failed to parse native certificate: {e:?}"); } } @@ -233,6 +230,12 @@ fn certs_from_native_store() -> RootCertStore { root_cert_store } +// Not currently used but allows for a "tls-rustls-no-roots" feature. +#[cfg(not(any(feature = "rustls-native-certs", feature = "webpki-roots")))] +fn import_root_certs() -> RootCertStore { + RootCertStore::empty() +} + #[derive(Debug)] struct DummyTlsVerifier { provider: Arc, diff --git a/sqlx-core/src/pool/executor.rs b/sqlx-core/src/pool/executor.rs index ba27b44316..0eda818a5b 100644 --- a/sqlx-core/src/pool/executor.rs +++ b/sqlx-core/src/pool/executor.rs @@ -9,7 +9,7 @@ use crate::error::Error; use crate::executor::{Execute, Executor}; use crate::pool::Pool; -impl<'p, DB: Database> Executor<'p> for &'_ Pool +impl Executor<'_> for &'_ Pool where for<'c> &'c mut DB::Connection: Executor<'c, Database = DB>, { diff --git a/sqlx-core/src/pool/inner.rs b/sqlx-core/src/pool/inner.rs index 2066364a8e..55accfd95d 100644 --- a/sqlx-core/src/pool/inner.rs +++ b/sqlx-core/src/pool/inner.rs @@ -94,7 +94,7 @@ impl PoolInner { self.on_closed.notify(usize::MAX); } - pub(super) fn close<'a>(self: &'a Arc) -> impl Future + 'a { + pub(super) fn close(self: &Arc) -> impl Future + '_ { self.mark_closed(); async move { @@ -124,7 +124,7 @@ impl PoolInner { /// /// If we steal a permit from the parent but *don't* open a connection, /// it should be returned to the parent. - async fn acquire_permit<'a>(self: &'a Arc) -> Result, Error> { + async fn acquire_permit(self: &Arc) -> Result, Error> { let parent = self .parent() // If we're already at the max size, we shouldn't try to steal from the parent. @@ -452,14 +452,14 @@ pub(super) fn is_beyond_max_lifetime( ) -> bool { options .max_lifetime - .map_or(false, |max| live.created_at.elapsed() > max) + .is_some_and(|max| live.created_at.elapsed() > max) } /// Returns `true` if the connection has exceeded `options.idle_timeout` if set, `false` otherwise. fn is_beyond_idle_timeout(idle: &Idle, options: &PoolOptions) -> bool { options .idle_timeout - .map_or(false, |timeout| idle.idle_since.elapsed() > timeout) + .is_some_and(|timeout| idle.idle_since.elapsed() > timeout) } async fn check_idle_conn( diff --git a/sqlx-core/src/pool/maybe.rs b/sqlx-core/src/pool/maybe.rs index f9f16c41a5..71a48728a2 100644 --- a/sqlx-core/src/pool/maybe.rs +++ b/sqlx-core/src/pool/maybe.rs @@ -8,7 +8,7 @@ pub enum MaybePoolConnection<'c, DB: Database> { PoolConnection(PoolConnection), } -impl<'c, DB: Database> Deref for MaybePoolConnection<'c, DB> { +impl Deref for MaybePoolConnection<'_, DB> { type Target = DB::Connection; #[inline] @@ -20,7 +20,7 @@ impl<'c, DB: Database> Deref for MaybePoolConnection<'c, DB> { } } -impl<'c, DB: Database> DerefMut for MaybePoolConnection<'c, DB> { +impl DerefMut for MaybePoolConnection<'_, DB> { #[inline] fn deref_mut(&mut self) -> &mut Self::Target { match self { @@ -30,7 +30,7 @@ impl<'c, DB: Database> DerefMut for MaybePoolConnection<'c, DB> { } } -impl<'c, DB: Database> From> for MaybePoolConnection<'c, DB> { +impl From> for MaybePoolConnection<'_, DB> { fn from(v: PoolConnection) -> Self { MaybePoolConnection::PoolConnection(v) } diff --git a/sqlx-core/src/query.rs b/sqlx-core/src/query.rs index 60f509c342..3a982e7e79 100644 --- a/sqlx-core/src/query.rs +++ b/sqlx-core/src/query.rs @@ -120,7 +120,7 @@ impl<'q, DB: Database> Query<'q, DB, ::Arguments<'q>> { } } -impl<'q, DB, A> Query<'q, DB, A> +impl Query<'_, DB, A> where DB: Database + HasStatementCache, { @@ -499,7 +499,7 @@ where /// Execute a single SQL query as a prepared statement (explicitly created). pub fn query_statement<'q, DB>( statement: &'q DB::Statement<'q>, -) -> Query<'q, DB, ::Arguments<'_>> +) -> Query<'q, DB, ::Arguments<'q>> where DB: Database, { diff --git a/sqlx-core/src/query_as.rs b/sqlx-core/src/query_as.rs index 9f28fe41e9..5a5ae4c73e 100644 --- a/sqlx-core/src/query_as.rs +++ b/sqlx-core/src/query_as.rs @@ -57,7 +57,7 @@ impl<'q, DB: Database, O> QueryAs<'q, DB, O, ::Arguments<'q>> { } } -impl<'q, DB, O, A> QueryAs<'q, DB, O, A> +impl QueryAs<'_, DB, O, A> where DB: Database + HasStatementCache, { @@ -386,7 +386,7 @@ where // Make a SQL query from a statement, that is mapped to a concrete type. pub fn query_statement_as<'q, DB, O>( statement: &'q DB::Statement<'q>, -) -> QueryAs<'q, DB, O, ::Arguments<'_>> +) -> QueryAs<'q, DB, O, ::Arguments<'q>> where DB: Database, O: for<'r> FromRow<'r, DB::Row>, diff --git a/sqlx-core/src/query_builder.rs b/sqlx-core/src/query_builder.rs index b242bf7b2a..b14d19adb2 100644 --- a/sqlx-core/src/query_builder.rs +++ b/sqlx-core/src/query_builder.rs @@ -30,7 +30,7 @@ where arguments: Option<::Arguments<'args>>, } -impl<'args, DB: Database> Default for QueryBuilder<'args, DB> { +impl Default for QueryBuilder<'_, DB> { fn default() -> Self { QueryBuilder { init_len: 0, @@ -191,7 +191,6 @@ where /// assert!(sql.ends_with("in (?, ?) ")); /// # } /// ``` - pub fn separated<'qb, Sep>(&'qb mut self, separator: Sep) -> Separated<'qb, 'args, DB, Sep> where 'args: 'qb, diff --git a/sqlx-core/src/query_scalar.rs b/sqlx-core/src/query_scalar.rs index c131adcca3..f3fcfb403a 100644 --- a/sqlx-core/src/query_scalar.rs +++ b/sqlx-core/src/query_scalar.rs @@ -54,7 +54,7 @@ impl<'q, DB: Database, O> QueryScalar<'q, DB, O, ::Arguments<'q> } } -impl<'q, DB, O, A> QueryScalar<'q, DB, O, A> +impl QueryScalar<'_, DB, O, A> where DB: Database + HasStatementCache, { @@ -365,7 +365,7 @@ where // Make a SQL query from a statement, that is mapped to a concrete value. pub fn query_statement_scalar<'q, DB, O>( statement: &'q DB::Statement<'q>, -) -> QueryScalar<'q, DB, O, ::Arguments<'_>> +) -> QueryScalar<'q, DB, O, ::Arguments<'q>> where DB: Database, (O,): for<'r> FromRow<'r, DB::Row>, diff --git a/sqlx-core/src/rt/mod.rs b/sqlx-core/src/rt/mod.rs index 43409073ab..d495994866 100644 --- a/sqlx-core/src/rt/mod.rs +++ b/sqlx-core/src/rt/mod.rs @@ -116,11 +116,11 @@ pub async fn yield_now() { pub fn test_block_on(f: F) -> F::Output { #[cfg(feature = "_rt-tokio")] { - return tokio::runtime::Builder::new_current_thread() + tokio::runtime::Builder::new_current_thread() .enable_all() .build() .expect("failed to start Tokio runtime") - .block_on(f); + .block_on(f) } #[cfg(all(feature = "_rt-async-std", not(feature = "_rt-tokio")))] diff --git a/sqlx-core/src/transaction.rs b/sqlx-core/src/transaction.rs index 2a84ff6555..0606fbee31 100644 --- a/sqlx-core/src/transaction.rs +++ b/sqlx-core/src/transaction.rs @@ -11,7 +11,6 @@ use crate::pool::MaybePoolConnection; /// Generic management of database transactions. /// /// This trait should not be used, except when implementing [`Connection`]. -#[doc(hidden)] pub trait TransactionManager { type Database: Database; @@ -199,7 +198,7 @@ where // } // } -impl<'c, DB> Debug for Transaction<'c, DB> +impl Debug for Transaction<'_, DB> where DB: Database, { @@ -209,7 +208,7 @@ where } } -impl<'c, DB> Deref for Transaction<'c, DB> +impl Deref for Transaction<'_, DB> where DB: Database, { @@ -221,7 +220,7 @@ where } } -impl<'c, DB> DerefMut for Transaction<'c, DB> +impl DerefMut for Transaction<'_, DB> where DB: Database, { @@ -235,13 +234,13 @@ where // `PgAdvisoryLockGuard`. // // See: https://github.com/launchbadge/sqlx/issues/2520 -impl<'c, DB: Database> AsMut for Transaction<'c, DB> { +impl AsMut for Transaction<'_, DB> { fn as_mut(&mut self) -> &mut DB::Connection { &mut self.connection } } -impl<'c, 't, DB: Database> crate::acquire::Acquire<'t> for &'t mut Transaction<'c, DB> { +impl<'t, DB: Database> crate::acquire::Acquire<'t> for &'t mut Transaction<'_, DB> { type Database = DB; type Connection = &'t mut ::Connection; @@ -257,7 +256,7 @@ impl<'c, 't, DB: Database> crate::acquire::Acquire<'t> for &'t mut Transaction<' } } -impl<'c, DB> Drop for Transaction<'c, DB> +impl Drop for Transaction<'_, DB> where DB: Database, { diff --git a/sqlx-core/src/type_checking.rs b/sqlx-core/src/type_checking.rs index 5766124530..1da6b7ab3f 100644 --- a/sqlx-core/src/type_checking.rs +++ b/sqlx-core/src/type_checking.rs @@ -112,7 +112,7 @@ where } } -impl<'v, DB> Debug for FmtValue<'v, DB> +impl Debug for FmtValue<'_, DB> where DB: Database, { diff --git a/sqlx-macros-core/Cargo.toml b/sqlx-macros-core/Cargo.toml index 07d9d78862..d78cbe3d63 100644 --- a/sqlx-macros-core/Cargo.toml +++ b/sqlx-macros-core/Cargo.toml @@ -6,6 +6,7 @@ license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true +rust-version.workspace = true [features] default = [] diff --git a/sqlx-macros-core/src/common.rs b/sqlx-macros-core/src/common.rs index fab09b7cae..b195a9ffd0 100644 --- a/sqlx-macros-core/src/common.rs +++ b/sqlx-macros-core/src/common.rs @@ -15,9 +15,9 @@ pub(crate) fn resolve_path(path: impl AsRef, err_span: Span) -> syn::Resul // requires `proc_macro::SourceFile::path()` to be stable // https://github.com/rust-lang/rust/issues/54725 if path.is_relative() - && !path + && path .parent() - .map_or(false, |parent| !parent.as_os_str().is_empty()) + .is_none_or(|parent| parent.as_os_str().is_empty()) { return Err(syn::Error::new( err_span, diff --git a/sqlx-macros-core/src/test_attr.rs b/sqlx-macros-core/src/test_attr.rs index d7c6eb0486..3104a0e743 100644 --- a/sqlx-macros-core/src/test_attr.rs +++ b/sqlx-macros-core/src/test_attr.rs @@ -246,13 +246,9 @@ fn parse_args(attr_args: AttributeArgs) -> syn::Result { fn recurse_lit_lookup(expr: Expr) -> Option { match expr { - Expr::Lit(syn::ExprLit { lit, .. }) => { - return Some(lit); - } - Expr::Group(syn::ExprGroup { expr, .. }) => { - return recurse_lit_lookup(*expr); - } - _ => return None, + Expr::Lit(syn::ExprLit { lit, .. }) => Some(lit), + Expr::Group(syn::ExprGroup { expr, .. }) => recurse_lit_lookup(*expr), + _ => None, } } diff --git a/sqlx-macros/Cargo.toml b/sqlx-macros/Cargo.toml index b513c3e808..032a190dd1 100644 --- a/sqlx-macros/Cargo.toml +++ b/sqlx-macros/Cargo.toml @@ -6,6 +6,7 @@ license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true +rust-version.workspace = true [lib] proc-macro = true diff --git a/sqlx-mysql/Cargo.toml b/sqlx-mysql/Cargo.toml index 3971c2ff87..9bba7aa0a9 100644 --- a/sqlx-mysql/Cargo.toml +++ b/sqlx-mysql/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +rust-version.workspace = true [features] json = ["sqlx-core/json", "serde"] diff --git a/sqlx-mysql/src/connection/establish.rs b/sqlx-mysql/src/connection/establish.rs index 85a9d84f96..ec7d8e4c2c 100644 --- a/sqlx-mysql/src/connection/establish.rs +++ b/sqlx-mysql/src/connection/establish.rs @@ -186,7 +186,7 @@ impl<'a> DoHandshake<'a> { } } -impl<'a> WithSocket for DoHandshake<'a> { +impl WithSocket for DoHandshake<'_> { type Output = Result; async fn with_socket(self, socket: S) -> Self::Output { diff --git a/sqlx-mysql/src/connection/executor.rs b/sqlx-mysql/src/connection/executor.rs index 4f5af4bf6d..44cb523f56 100644 --- a/sqlx-mysql/src/connection/executor.rs +++ b/sqlx-mysql/src/connection/executor.rs @@ -25,7 +25,7 @@ use futures_util::TryStreamExt; use std::{borrow::Cow, pin::pin, sync::Arc}; impl MySqlConnection { - async fn prepare_statement<'c>( + async fn prepare_statement( &mut self, sql: &str, ) -> Result<(u32, MySqlStatementMetadata), Error> { @@ -72,7 +72,7 @@ impl MySqlConnection { Ok((id, metadata)) } - async fn get_or_prepare_statement<'c>( + async fn get_or_prepare_statement( &mut self, sql: &str, ) -> Result<(u32, MySqlStatementMetadata), Error> { diff --git a/sqlx-mysql/src/io/buf_mut.rs b/sqlx-mysql/src/io/buf_mut.rs index e40148e7b5..11bd2da5a2 100644 --- a/sqlx-mysql/src/io/buf_mut.rs +++ b/sqlx-mysql/src/io/buf_mut.rs @@ -45,7 +45,7 @@ impl MySqlBufMutExt for Vec { #[test] fn test_encodes_int_lenenc_u8() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(0xFA as u64); + buf.put_uint_lenenc(0xFA_u64); assert_eq!(&buf[..], b"\xFA"); } @@ -53,7 +53,7 @@ fn test_encodes_int_lenenc_u8() { #[test] fn test_encodes_int_lenenc_u16() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(std::u16::MAX as u64); + buf.put_uint_lenenc(u16::MAX as u64); assert_eq!(&buf[..], b"\xFC\xFF\xFF"); } @@ -61,7 +61,7 @@ fn test_encodes_int_lenenc_u16() { #[test] fn test_encodes_int_lenenc_u24() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(0xFF_FF_FF as u64); + buf.put_uint_lenenc(0xFF_FF_FF_u64); assert_eq!(&buf[..], b"\xFD\xFF\xFF\xFF"); } @@ -69,7 +69,7 @@ fn test_encodes_int_lenenc_u24() { #[test] fn test_encodes_int_lenenc_u64() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(std::u64::MAX); + buf.put_uint_lenenc(u64::MAX); assert_eq!(&buf[..], b"\xFE\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"); } @@ -77,7 +77,7 @@ fn test_encodes_int_lenenc_u64() { #[test] fn test_encodes_int_lenenc_fb() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(0xFB as u64); + buf.put_uint_lenenc(0xFB_u64); assert_eq!(&buf[..], b"\xFC\xFB\x00"); } @@ -85,7 +85,7 @@ fn test_encodes_int_lenenc_fb() { #[test] fn test_encodes_int_lenenc_fc() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(0xFC as u64); + buf.put_uint_lenenc(0xFC_u64); assert_eq!(&buf[..], b"\xFC\xFC\x00"); } @@ -93,7 +93,7 @@ fn test_encodes_int_lenenc_fc() { #[test] fn test_encodes_int_lenenc_fd() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(0xFD as u64); + buf.put_uint_lenenc(0xFD_u64); assert_eq!(&buf[..], b"\xFC\xFD\x00"); } @@ -101,7 +101,7 @@ fn test_encodes_int_lenenc_fd() { #[test] fn test_encodes_int_lenenc_fe() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(0xFE as u64); + buf.put_uint_lenenc(0xFE_u64); assert_eq!(&buf[..], b"\xFC\xFE\x00"); } @@ -109,7 +109,7 @@ fn test_encodes_int_lenenc_fe() { #[test] fn test_encodes_int_lenenc_ff() { let mut buf = Vec::with_capacity(1024); - buf.put_uint_lenenc(0xFF as u64); + buf.put_uint_lenenc(0xFF_u64); assert_eq!(&buf[..], b"\xFC\xFF\x00"); } diff --git a/sqlx-mysql/src/protocol/statement/execute.rs b/sqlx-mysql/src/protocol/statement/execute.rs index 6e51e7b564..89010315bb 100644 --- a/sqlx-mysql/src/protocol/statement/execute.rs +++ b/sqlx-mysql/src/protocol/statement/execute.rs @@ -11,7 +11,7 @@ pub struct Execute<'q> { pub arguments: &'q MySqlArguments, } -impl<'q> ProtocolEncode<'_, Capabilities> for Execute<'q> { +impl ProtocolEncode<'_, Capabilities> for Execute<'_> { fn encode_with(&self, buf: &mut Vec, _: Capabilities) -> Result<(), crate::Error> { buf.push(0x17); // COM_STMT_EXECUTE buf.extend(&self.statement.to_le_bytes()); diff --git a/sqlx-mysql/src/types/text.rs b/sqlx-mysql/src/types/text.rs index ad61c1bee8..363ec02439 100644 --- a/sqlx-mysql/src/types/text.rs +++ b/sqlx-mysql/src/types/text.rs @@ -16,7 +16,7 @@ impl Type for Text { } } -impl<'q, T> Encode<'q, MySql> for Text +impl Encode<'_, MySql> for Text where T: Display, { diff --git a/sqlx-postgres/Cargo.toml b/sqlx-postgres/Cargo.toml index 818aadbab7..f9328d03ca 100644 --- a/sqlx-postgres/Cargo.toml +++ b/sqlx-postgres/Cargo.toml @@ -7,7 +7,7 @@ license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +rust-version.workspace = true [features] any = ["sqlx-core/any"] diff --git a/sqlx-postgres/src/advisory_lock.rs b/sqlx-postgres/src/advisory_lock.rs index d1aef176fb..047ede6be6 100644 --- a/sqlx-postgres/src/advisory_lock.rs +++ b/sqlx-postgres/src/advisory_lock.rs @@ -362,7 +362,7 @@ impl<'lock, C: AsMut> PgAdvisoryLockGuard<'lock, C> { } } -impl<'lock, C: AsMut + AsRef> Deref for PgAdvisoryLockGuard<'lock, C> { +impl + AsRef> Deref for PgAdvisoryLockGuard<'_, C> { type Target = PgConnection; fn deref(&self) -> &Self::Target { @@ -376,16 +376,14 @@ impl<'lock, C: AsMut + AsRef> Deref for PgAdvisoryLo /// However, replacing the connection with a different one using, e.g. [`std::mem::replace()`] /// is a logic error and will cause a warning to be logged by the PostgreSQL server when this /// guard attempts to release the lock. -impl<'lock, C: AsMut + AsRef> DerefMut - for PgAdvisoryLockGuard<'lock, C> -{ +impl + AsRef> DerefMut for PgAdvisoryLockGuard<'_, C> { fn deref_mut(&mut self) -> &mut Self::Target { self.conn.as_mut().expect(NONE_ERR).as_mut() } } -impl<'lock, C: AsMut + AsRef> AsRef - for PgAdvisoryLockGuard<'lock, C> +impl + AsRef> AsRef + for PgAdvisoryLockGuard<'_, C> { fn as_ref(&self) -> &PgConnection { self.conn.as_ref().expect(NONE_ERR).as_ref() @@ -398,7 +396,7 @@ impl<'lock, C: AsMut + AsRef> AsRef /// However, replacing the connection with a different one using, e.g. [`std::mem::replace()`] /// is a logic error and will cause a warning to be logged by the PostgreSQL server when this /// guard attempts to release the lock. -impl<'lock, C: AsMut> AsMut for PgAdvisoryLockGuard<'lock, C> { +impl> AsMut for PgAdvisoryLockGuard<'_, C> { fn as_mut(&mut self) -> &mut PgConnection { self.conn.as_mut().expect(NONE_ERR).as_mut() } @@ -407,7 +405,7 @@ impl<'lock, C: AsMut> AsMut for PgAdvisoryLockGuard< /// Queues a `pg_advisory_unlock()` call on the wrapped connection which will be flushed /// to the server the next time it is used, or when it is returned to [`PgPool`][crate::PgPool] /// in the case of [`PoolConnection`][crate::pool::PoolConnection]. -impl<'lock, C: AsMut> Drop for PgAdvisoryLockGuard<'lock, C> { +impl> Drop for PgAdvisoryLockGuard<'_, C> { fn drop(&mut self) { if let Some(mut conn) = self.conn.take() { // Queue a simple query message to execute next time the connection is used. diff --git a/sqlx-postgres/src/connection/executor.rs b/sqlx-postgres/src/connection/executor.rs index 3fe4f402d8..d0596aacee 100644 --- a/sqlx-postgres/src/connection/executor.rs +++ b/sqlx-postgres/src/connection/executor.rs @@ -165,7 +165,7 @@ impl PgConnection { self.inner.pending_ready_for_query_count += 1; } - async fn get_or_prepare<'a>( + async fn get_or_prepare( &mut self, sql: &str, parameters: &[PgTypeInfo], diff --git a/sqlx-postgres/src/connection/tls.rs b/sqlx-postgres/src/connection/tls.rs index 16b7333bf5..a49c9caa8c 100644 --- a/sqlx-postgres/src/connection/tls.rs +++ b/sqlx-postgres/src/connection/tls.rs @@ -7,7 +7,7 @@ use crate::{PgConnectOptions, PgSslMode}; pub struct MaybeUpgradeTls<'a>(pub &'a PgConnectOptions); -impl<'a> WithSocket for MaybeUpgradeTls<'a> { +impl WithSocket for MaybeUpgradeTls<'_> { type Output = crate::Result>; async fn with_socket(self, socket: S) -> Self::Output { diff --git a/sqlx-postgres/src/listener.rs b/sqlx-postgres/src/listener.rs index 17a46a916f..32658534c4 100644 --- a/sqlx-postgres/src/listener.rs +++ b/sqlx-postgres/src/listener.rs @@ -506,12 +506,12 @@ fn build_listen_all_query(channels: impl IntoIterator>) - #[test] fn test_build_listen_all_query_with_single_channel() { - let output = build_listen_all_query(&["test"]); + let output = build_listen_all_query(["test"]); assert_eq!(output.as_str(), r#"LISTEN "test";"#); } #[test] fn test_build_listen_all_query_with_multiple_channels() { - let output = build_listen_all_query(&["channel.0", "channel.1"]); + let output = build_listen_all_query(["channel.0", "channel.1"]); assert_eq!(output.as_str(), r#"LISTEN "channel.0";LISTEN "channel.1";"#); } diff --git a/sqlx-postgres/src/message/response.rs b/sqlx-postgres/src/message/response.rs index d6e43e0871..a7c09cfa34 100644 --- a/sqlx-postgres/src/message/response.rs +++ b/sqlx-postgres/src/message/response.rs @@ -195,7 +195,7 @@ struct Fields<'a> { offset: usize, } -impl<'a> Iterator for Fields<'a> { +impl Iterator for Fields<'_> { type Item = (u8, Range); fn next(&mut self) -> Option { diff --git a/sqlx-postgres/src/options/parse.rs b/sqlx-postgres/src/options/parse.rs index efbf85d8f6..e911305698 100644 --- a/sqlx-postgres/src/options/parse.rs +++ b/sqlx-postgres/src/options/parse.rs @@ -336,7 +336,7 @@ fn built_url_can_be_parsed() { let url = "postgres://username:p@ssw0rd@hostname:5432/database"; let opts = PgConnectOptions::from_str(url).unwrap(); - let parsed = PgConnectOptions::from_str(&opts.build_url().to_string()); + let parsed = PgConnectOptions::from_str(opts.build_url().as_ref()); assert!(parsed.is_ok()); } diff --git a/sqlx-postgres/src/types/bigdecimal.rs b/sqlx-postgres/src/types/bigdecimal.rs index 869f850797..cb9ce0a948 100644 --- a/sqlx-postgres/src/types/bigdecimal.rs +++ b/sqlx-postgres/src/types/bigdecimal.rs @@ -213,7 +213,8 @@ fn sign_to_pg(sign: Sign) -> PgNumericSign { } #[cfg(test)] -mod bigdecimal_to_pgnumeric { +#[allow(clippy::zero_prefixed_literal)] // Used for clarity +mod tests { use super::{BigDecimal, PgNumeric, PgNumericSign}; use std::convert::TryFrom; diff --git a/sqlx-postgres/src/types/bit_vec.rs b/sqlx-postgres/src/types/bit_vec.rs index b519a5f24c..92eb47b210 100644 --- a/sqlx-postgres/src/types/bit_vec.rs +++ b/sqlx-postgres/src/types/bit_vec.rs @@ -55,7 +55,7 @@ impl Decode<'_, Postgres> for BitVec { let len = usize::try_from(len).map_err(|_| format!("invalid VARBIT len: {len}"))?; // The smallest amount of data we can read is one byte - let bytes_len = (len + 7) / 8; + let bytes_len = len.div_ceil(8); if bytes.remaining() != bytes_len { Err(io::Error::new( diff --git a/sqlx-postgres/src/types/cube.rs b/sqlx-postgres/src/types/cube.rs index cc2a016090..d7ddbd1723 100644 --- a/sqlx-postgres/src/types/cube.rs +++ b/sqlx-postgres/src/types/cube.rs @@ -71,7 +71,7 @@ impl<'r> Decode<'r, Postgres> for PgCube { } } -impl<'q> Encode<'q, Postgres> for PgCube { +impl Encode<'_, Postgres> for PgCube { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("cube")) } diff --git a/sqlx-postgres/src/types/geometry/box.rs b/sqlx-postgres/src/types/geometry/box.rs index 28016b2786..ad4fa39ef7 100644 --- a/sqlx-postgres/src/types/geometry/box.rs +++ b/sqlx-postgres/src/types/geometry/box.rs @@ -56,7 +56,7 @@ impl<'r> Decode<'r, Postgres> for PgBox { } } -impl<'q> Encode<'q, Postgres> for PgBox { +impl Encode<'_, Postgres> for PgBox { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("box")) } diff --git a/sqlx-postgres/src/types/geometry/circle.rs b/sqlx-postgres/src/types/geometry/circle.rs index dde54dd276..3f374ea74e 100644 --- a/sqlx-postgres/src/types/geometry/circle.rs +++ b/sqlx-postgres/src/types/geometry/circle.rs @@ -54,7 +54,7 @@ impl<'r> Decode<'r, Postgres> for PgCircle { } } -impl<'q> Encode<'q, Postgres> for PgCircle { +impl Encode<'_, Postgres> for PgCircle { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("circle")) } diff --git a/sqlx-postgres/src/types/geometry/line.rs b/sqlx-postgres/src/types/geometry/line.rs index 8f08c949ef..6bc90676ed 100644 --- a/sqlx-postgres/src/types/geometry/line.rs +++ b/sqlx-postgres/src/types/geometry/line.rs @@ -47,7 +47,7 @@ impl<'r> Decode<'r, Postgres> for PgLine { } } -impl<'q> Encode<'q, Postgres> for PgLine { +impl Encode<'_, Postgres> for PgLine { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("line")) } diff --git a/sqlx-postgres/src/types/geometry/line_segment.rs b/sqlx-postgres/src/types/geometry/line_segment.rs index cd08e4da4a..486d2ba07d 100644 --- a/sqlx-postgres/src/types/geometry/line_segment.rs +++ b/sqlx-postgres/src/types/geometry/line_segment.rs @@ -57,7 +57,7 @@ impl<'r> Decode<'r, Postgres> for PgLSeg { } } -impl<'q> Encode<'q, Postgres> for PgLSeg { +impl Encode<'_, Postgres> for PgLSeg { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("lseg")) } diff --git a/sqlx-postgres/src/types/geometry/path.rs b/sqlx-postgres/src/types/geometry/path.rs index 6799289fac..4f99e7e983 100644 --- a/sqlx-postgres/src/types/geometry/path.rs +++ b/sqlx-postgres/src/types/geometry/path.rs @@ -64,7 +64,7 @@ impl<'r> Decode<'r, Postgres> for PgPath { } } -impl<'q> Encode<'q, Postgres> for PgPath { +impl Encode<'_, Postgres> for PgPath { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("path")) } diff --git a/sqlx-postgres/src/types/geometry/point.rs b/sqlx-postgres/src/types/geometry/point.rs index 5078ce1ee4..1a57403810 100644 --- a/sqlx-postgres/src/types/geometry/point.rs +++ b/sqlx-postgres/src/types/geometry/point.rs @@ -50,7 +50,7 @@ impl<'r> Decode<'r, Postgres> for PgPoint { } } -impl<'q> Encode<'q, Postgres> for PgPoint { +impl Encode<'_, Postgres> for PgPoint { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("point")) } diff --git a/sqlx-postgres/src/types/geometry/polygon.rs b/sqlx-postgres/src/types/geometry/polygon.rs index a5a203c680..e612b93499 100644 --- a/sqlx-postgres/src/types/geometry/polygon.rs +++ b/sqlx-postgres/src/types/geometry/polygon.rs @@ -63,7 +63,7 @@ impl<'r> Decode<'r, Postgres> for PgPolygon { } } -impl<'q> Encode<'q, Postgres> for PgPolygon { +impl Encode<'_, Postgres> for PgPolygon { fn produces(&self) -> Option { Some(PgTypeInfo::with_name("polygon")) } diff --git a/sqlx-postgres/src/types/interval.rs b/sqlx-postgres/src/types/interval.rs index 02b1faa67a..af4810323d 100644 --- a/sqlx-postgres/src/types/interval.rs +++ b/sqlx-postgres/src/types/interval.rs @@ -231,7 +231,7 @@ fn test_encode_interval() { microseconds: 0, }; assert!(matches!( - Encode::::encode(&interval, &mut buf), + Encode::::encode(interval, &mut buf), Ok(IsNull::No) )); assert_eq!(&**buf, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); @@ -243,7 +243,7 @@ fn test_encode_interval() { microseconds: 1_000, }; assert!(matches!( - Encode::::encode(&interval, &mut buf), + Encode::::encode(interval, &mut buf), Ok(IsNull::No) )); assert_eq!(&**buf, [0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 0, 0, 0, 0, 0, 0]); @@ -255,7 +255,7 @@ fn test_encode_interval() { microseconds: 1_000_000, }; assert!(matches!( - Encode::::encode(&interval, &mut buf), + Encode::::encode(interval, &mut buf), Ok(IsNull::No) )); assert_eq!(&**buf, [0, 0, 0, 0, 0, 15, 66, 64, 0, 0, 0, 0, 0, 0, 0, 0]); @@ -267,7 +267,7 @@ fn test_encode_interval() { microseconds: 3_600_000_000, }; assert!(matches!( - Encode::::encode(&interval, &mut buf), + Encode::::encode(interval, &mut buf), Ok(IsNull::No) )); assert_eq!( @@ -282,7 +282,7 @@ fn test_encode_interval() { microseconds: 0, }; assert!(matches!( - Encode::::encode(&interval, &mut buf), + Encode::::encode(interval, &mut buf), Ok(IsNull::No) )); assert_eq!(&**buf, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]); @@ -294,7 +294,7 @@ fn test_encode_interval() { microseconds: 0, }; assert!(matches!( - Encode::::encode(&interval, &mut buf), + Encode::::encode(interval, &mut buf), Ok(IsNull::No) )); assert_eq!(&**buf, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); diff --git a/sqlx-postgres/src/types/json.rs b/sqlx-postgres/src/types/json.rs index 567e48015e..32f886c781 100644 --- a/sqlx-postgres/src/types/json.rs +++ b/sqlx-postgres/src/types/json.rs @@ -54,7 +54,7 @@ impl PgHasArrayType for JsonRawValue { } } -impl<'q, T> Encode<'q, Postgres> for Json +impl Encode<'_, Postgres> for Json where T: Serialize, { diff --git a/sqlx-postgres/src/types/rust_decimal.rs b/sqlx-postgres/src/types/rust_decimal.rs index 8321e82811..549043d98d 100644 --- a/sqlx-postgres/src/types/rust_decimal.rs +++ b/sqlx-postgres/src/types/rust_decimal.rs @@ -188,6 +188,7 @@ impl Decode<'_, Postgres> for Decimal { } #[cfg(test)] +#[allow(clippy::zero_prefixed_literal)] // Used for clarity mod tests { use super::{Decimal, PgNumeric, PgNumericSign}; use std::convert::TryFrom; @@ -205,7 +206,7 @@ mod tests { fn one() { let one: Decimal = "1".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&one).unwrap(), + PgNumeric::from(&one), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 0, @@ -219,7 +220,7 @@ mod tests { fn ten() { let ten: Decimal = "10".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&ten).unwrap(), + PgNumeric::from(&ten), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 0, @@ -233,7 +234,7 @@ mod tests { fn one_hundred() { let one_hundred: Decimal = "100".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&one_hundred).unwrap(), + PgNumeric::from(&one_hundred), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 0, @@ -248,7 +249,7 @@ mod tests { // Decimal doesn't normalize here let ten_thousand: Decimal = "10000".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&ten_thousand).unwrap(), + PgNumeric::from(&ten_thousand), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 0, @@ -262,7 +263,7 @@ mod tests { fn two_digits() { let two_digits: Decimal = "12345".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&two_digits).unwrap(), + PgNumeric::from(&two_digits), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 0, @@ -276,7 +277,7 @@ mod tests { fn one_tenth() { let one_tenth: Decimal = "0.1".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&one_tenth).unwrap(), + PgNumeric::from(&one_tenth), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 1, @@ -290,7 +291,7 @@ mod tests { fn decimal_1() { let decimal: Decimal = "1.2345".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&decimal).unwrap(), + PgNumeric::from(&decimal), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 4, @@ -304,7 +305,7 @@ mod tests { fn decimal_2() { let decimal: Decimal = "0.12345".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&decimal).unwrap(), + PgNumeric::from(&decimal), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 5, @@ -318,7 +319,7 @@ mod tests { fn decimal_3() { let decimal: Decimal = "0.01234".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&decimal).unwrap(), + PgNumeric::from(&decimal), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 5, @@ -337,7 +338,7 @@ mod tests { weight: 1, digits: vec![1, 2345, 6789], }; - assert_eq!(PgNumeric::try_from(&decimal).unwrap(), expected_numeric); + assert_eq!(PgNumeric::from(&decimal), expected_numeric); let actual_decimal = Decimal::try_from(expected_numeric).unwrap(); assert_eq!(actual_decimal, decimal); @@ -354,10 +355,7 @@ mod tests { weight: -2, digits: vec![1234], }; - assert_eq!( - PgNumeric::try_from(&one_digit_decimal).unwrap(), - expected_numeric - ); + assert_eq!(PgNumeric::from(&one_digit_decimal), expected_numeric); let actual_decimal = Decimal::try_from(expected_numeric).unwrap(); assert_eq!(actual_decimal, one_digit_decimal); @@ -373,10 +371,7 @@ mod tests { weight: 7, digits: vec![7, 9228, 1625, 1426, 4337, 5935, 4395, 0335], }; - assert_eq!( - PgNumeric::try_from(&Decimal::MAX).unwrap(), - expected_numeric - ); + assert_eq!(PgNumeric::from(&Decimal::MAX), expected_numeric); let actual_decimal = Decimal::try_from(expected_numeric).unwrap(); assert_eq!(actual_decimal, Decimal::MAX); @@ -399,10 +394,7 @@ mod tests { weight: 0, digits: vec![7, 9228, 1625, 1426, 4337, 5935, 4395, 0335], }; - assert_eq!( - PgNumeric::try_from(&max_value_max_scale).unwrap(), - expected_numeric - ); + assert_eq!(PgNumeric::from(&max_value_max_scale), expected_numeric); let actual_decimal = Decimal::try_from(expected_numeric).unwrap(); assert_eq!(actual_decimal, max_value_max_scale); @@ -418,7 +410,7 @@ mod tests { // This is a regression test for https://github.com/launchbadge/sqlx/issues/423 let four_digit: Decimal = "1234".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&four_digit).unwrap(), + PgNumeric::from(&four_digit), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 0, @@ -433,7 +425,7 @@ mod tests { // This is a regression test for https://github.com/launchbadge/sqlx/issues/423 let negative_four_digit: Decimal = "-1234".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&negative_four_digit).unwrap(), + PgNumeric::from(&negative_four_digit), PgNumeric::Number { sign: PgNumericSign::Negative, scale: 0, @@ -448,7 +440,7 @@ mod tests { // This is a regression test for https://github.com/launchbadge/sqlx/issues/423 let eight_digit: Decimal = "12345678".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&eight_digit).unwrap(), + PgNumeric::from(&eight_digit), PgNumeric::Number { sign: PgNumericSign::Positive, scale: 0, @@ -463,7 +455,7 @@ mod tests { // This is a regression test for https://github.com/launchbadge/sqlx/issues/423 let negative_eight_digit: Decimal = "-12345678".parse().unwrap(); assert_eq!( - PgNumeric::try_from(&negative_eight_digit).unwrap(), + PgNumeric::from(&negative_eight_digit), PgNumeric::Number { sign: PgNumericSign::Negative, scale: 0, @@ -483,7 +475,7 @@ mod tests { weight: 0, digits: vec![100], }; - assert_eq!(PgNumeric::try_from(&one_hundred).unwrap(), expected_numeric); + assert_eq!(PgNumeric::from(&one_hundred), expected_numeric); let actual_decimal = Decimal::try_from(expected_numeric).unwrap(); assert_eq!(actual_decimal, one_hundred); diff --git a/sqlx-postgres/src/types/text.rs b/sqlx-postgres/src/types/text.rs index b5b0a5ed7b..12d92d4b2a 100644 --- a/sqlx-postgres/src/types/text.rs +++ b/sqlx-postgres/src/types/text.rs @@ -18,7 +18,7 @@ impl Type for Text { } } -impl<'q, T> Encode<'q, Postgres> for Text +impl Encode<'_, Postgres> for Text where T: Display, { diff --git a/sqlx-sqlite/Cargo.toml b/sqlx-sqlite/Cargo.toml index ca4c84c958..151283deda 100644 --- a/sqlx-sqlite/Cargo.toml +++ b/sqlx-sqlite/Cargo.toml @@ -7,8 +7,7 @@ license.workspace = true edition.workspace = true authors.workspace = true repository.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +rust-version.workspace = true [features] any = ["sqlx-core/any"] diff --git a/sqlx-sqlite/src/connection/intmap.rs b/sqlx-sqlite/src/connection/intmap.rs index dc09162f64..fb0553fb30 100644 --- a/sqlx-sqlite/src/connection/intmap.rs +++ b/sqlx-sqlite/src/connection/intmap.rs @@ -24,9 +24,11 @@ impl IntMap { } pub(crate) fn expand(&mut self, size: i64) -> usize { - let idx = size.try_into().expect("negative column index unsupported"); - while self.0.len() <= idx { - self.0.push(None); + let idx = usize::try_from(size).expect("negative column index unsupported"); + if idx >= self.0.len() { + let new_len = idx.checked_add(1).expect("idx + 1 overflowed"); + + self.0.resize_with(new_len, || None); } idx } @@ -95,15 +97,9 @@ impl IntMap { } impl IntMap { - pub(crate) fn get_mut_or_default<'a>(&'a mut self, idx: &i64) -> &'a mut V { + pub(crate) fn get_mut_or_default(&mut self, idx: &i64) -> &mut V { let idx: usize = self.expand(*idx); - - let item: &mut Option = &mut self.0[idx]; - if item.is_none() { - *item = Some(V::default()); - } - - return self.0[idx].as_mut().unwrap(); + self.0[idx].get_or_insert_default() } } @@ -132,7 +128,7 @@ impl IntMap { 0 }; self.iter() - .chain(std::iter::repeat(None).take(self_pad)) + .chain(std::iter::repeat_n(None, self_pad)) .zip(prev.iter().chain(std::iter::repeat(None))) .enumerate() .filter(|(_i, (n, p))| n != p) diff --git a/sqlx-sqlite/src/connection/worker.rs b/sqlx-sqlite/src/connection/worker.rs index 00a4c2999c..ec8b38f0f6 100644 --- a/sqlx-sqlite/src/connection/worker.rs +++ b/sqlx-sqlite/src/connection/worker.rs @@ -145,14 +145,14 @@ impl ConnectionWorker { let _guard = span.enter(); match cmd { Command::Prepare { query, tx } => { - tx.send(prepare(&mut conn, &query).map(|prepared| { - update_cached_statements_size( - &conn, - &shared.cached_statements_size, - ); - prepared - })) - .ok(); + tx.send(prepare(&mut conn, &query)).ok(); + + // This may issue an unnecessary write on failure, + // but it doesn't matter in the grand scheme of things. + update_cached_statements_size( + &conn, + &shared.cached_statements_size, + ); } Command::Describe { query, tx } => { tx.send(describe(&mut conn, &query)).ok(); diff --git a/sqlx-sqlite/src/logger.rs b/sqlx-sqlite/src/logger.rs index 3abed8cebc..1464a730c7 100644 --- a/sqlx-sqlite/src/logger.rs +++ b/sqlx-sqlite/src/logger.rs @@ -436,7 +436,7 @@ impl<'q, R: Debug, S: Debug + DebugDiff, P: Debug> QueryPlanLogger<'q, R, S, P> } } -impl<'q, R: Debug, S: Debug + DebugDiff, P: Debug> Drop for QueryPlanLogger<'q, R, S, P> { +impl Drop for QueryPlanLogger<'_, R, S, P> { fn drop(&mut self) { self.finish(); } diff --git a/sqlx-sqlite/src/value.rs b/sqlx-sqlite/src/value.rs index 469c4e70d5..dc40f29ccb 100644 --- a/sqlx-sqlite/src/value.rs +++ b/sqlx-sqlite/src/value.rs @@ -108,8 +108,8 @@ pub(crate) struct ValueHandle<'a> { } // SAFE: only protected value objects are stored in SqliteValue -unsafe impl<'a> Send for ValueHandle<'a> {} -unsafe impl<'a> Sync for ValueHandle<'a> {} +unsafe impl Send for ValueHandle<'_> {} +unsafe impl Sync for ValueHandle<'_> {} impl ValueHandle<'static> { fn new_owned(value: NonNull, type_info: SqliteTypeInfo) -> Self { @@ -122,7 +122,7 @@ impl ValueHandle<'static> { } } -impl<'a> ValueHandle<'a> { +impl ValueHandle<'_> { fn new_borrowed(value: NonNull, type_info: SqliteTypeInfo) -> Self { Self { value, @@ -185,7 +185,7 @@ impl<'a> ValueHandle<'a> { } } -impl<'a> Drop for ValueHandle<'a> { +impl Drop for ValueHandle<'_> { fn drop(&mut self) { if self.free_on_drop { unsafe { diff --git a/sqlx-test/Cargo.toml b/sqlx-test/Cargo.toml index af76d5562f..32a341adcb 100644 --- a/sqlx-test/Cargo.toml +++ b/sqlx-test/Cargo.toml @@ -1,8 +1,9 @@ [package] name = "sqlx-test" version = "0.1.0" -edition = "2021" +edition.workspace = true publish = false +rust-version.workspace = true [dependencies] sqlx = { default-features = false, path = ".." } diff --git a/src/lib.md b/src/lib.md index aa45b15730..7fc5b899a7 100644 --- a/src/lib.md +++ b/src/lib.md @@ -12,8 +12,6 @@ You choose which runtime SQLx uses by default by enabling one of the following f * `runtime-async-std` * `runtime-tokio` -The `runtime-actix` feature also exists but is an alias of `runtime-tokio`. - If more than one runtime feature is enabled, the Tokio runtime is used if a Tokio context exists on the current thread, i.e. [`tokio::runtime::Handle::try_current()`] returns `Ok`; `async-std` is used otherwise. @@ -53,9 +51,6 @@ Consult the user manual for your database to find the TLS versions it supports. If your connection configuration requires a TLS upgrade but TLS support was not enabled, the connection attempt will return an error. -The legacy runtime+TLS combination feature flags are still supported, but for forward-compatibility, use of the separate -runtime and TLS feature flags is recommended. - [the LaunchBadge team]: https://www.launchbadge.com [README]: https://www.github.com/launchbadge/sqlx/tree/main/README.md [browse our example projects]: https://www.github.com/launchbadge/sqlx/tree/main/examples diff --git a/src/lib.rs b/src/lib.rs index ed76c5f5ee..e55dc26e36 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -30,7 +30,7 @@ pub use sqlx_core::query_scalar::{query_scalar, query_scalar_with}; pub use sqlx_core::raw_sql::{raw_sql, RawSql}; pub use sqlx_core::row::Row; pub use sqlx_core::statement::Statement; -pub use sqlx_core::transaction::{Transaction, TransactionManager}; +pub use sqlx_core::transaction::Transaction; pub use sqlx_core::type_info::TypeInfo; pub use sqlx_core::types::Type; pub use sqlx_core::value::{Value, ValueRef}; diff --git a/tests/mysql/mysql.rs b/tests/mysql/mysql.rs index fe0f24050d..8af6c91de2 100644 --- a/tests/mysql/mysql.rs +++ b/tests/mysql/mysql.rs @@ -580,7 +580,7 @@ async fn test_shrink_buffers() -> anyhow::Result<()> { conn.shrink_buffers(); let ret: i64 = sqlx::query_scalar("SELECT ?") - .bind(&12345678i64) + .bind(12345678i64) .fetch_one(&mut conn) .await?; diff --git a/tests/mysql/types.rs b/tests/mysql/types.rs index e837a53f75..7eff6882e2 100644 --- a/tests/mysql/types.rs +++ b/tests/mysql/types.rs @@ -316,8 +316,8 @@ CREATE TEMPORARY TABLE with_bits ( .await?; sqlx::query("INSERT INTO with_bits (value_1, value_n) VALUES (?, ?)") - .bind(&1_u8) - .bind(&510202_u32) + .bind(1_u8) + .bind(510202_u32) .execute(&mut conn) .await?; diff --git a/tests/postgres/query_builder.rs b/tests/postgres/query_builder.rs index 08ed7d11a3..cdec136976 100644 --- a/tests/postgres/query_builder.rs +++ b/tests/postgres/query_builder.rs @@ -55,7 +55,7 @@ fn test_build() { let query = qb.build(); assert_eq!(query.sql(), "SELECT * FROM users WHERE id = $1"); - assert_eq!(Execute::persistent(&query), true); + assert!(Execute::persistent(&query)); } #[test] diff --git a/tests/postgres/test-attr.rs b/tests/postgres/test-attr.rs index d662024379..78a8b1f59a 100644 --- a/tests/postgres/test-attr.rs +++ b/tests/postgres/test-attr.rs @@ -158,7 +158,7 @@ async fn it_gets_comments(pool: PgPool) -> sqlx::Result<()> { let post_1_comments: Vec = sqlx::query_scalar( "SELECT content FROM comment WHERE post_id = $1::uuid ORDER BY created_at", ) - .bind(&"252c1d98-a9b0-4f18-8298-e59058bdfe16") + .bind("252c1d98-a9b0-4f18-8298-e59058bdfe16") .fetch_all(&pool) .await?; @@ -170,7 +170,7 @@ async fn it_gets_comments(pool: PgPool) -> sqlx::Result<()> { let post_2_comments: Vec = sqlx::query_scalar( "SELECT content FROM comment WHERE post_id = $1::uuid ORDER BY created_at", ) - .bind(&"844265f7-2472-4689-9a2e-b21f40dbf401") + .bind("844265f7-2472-4689-9a2e-b21f40dbf401") .fetch_all(&pool) .await?; diff --git a/tests/sqlite/any.rs b/tests/sqlite/any.rs index 856db70c05..b71c3ba43d 100644 --- a/tests/sqlite/any.rs +++ b/tests/sqlite/any.rs @@ -1,4 +1,4 @@ -use sqlx::{Any, Sqlite}; +use sqlx::Any; use sqlx_test::new; #[sqlx_macros::test] diff --git a/tests/sqlite/sqlite.rs b/tests/sqlite/sqlite.rs index c23c4fc9ef..4d24b07412 100644 --- a/tests/sqlite/sqlite.rs +++ b/tests/sqlite/sqlite.rs @@ -639,7 +639,7 @@ async fn issue_1467() -> anyhow::Result<()> { // Random seed: let seed: [u8; 32] = rand::random(); - println!("RNG seed: {}", hex::encode(&seed)); + println!("RNG seed: {}", hex::encode(seed)); // Pre-determined seed: // let mut seed: [u8; 32] = [0u8; 32]; @@ -734,7 +734,7 @@ async fn test_query_with_progress_handler() -> anyhow::Result<()> { let mut conn = new::().await?; // Using this string as a canary to ensure the callback doesn't get called with the wrong data pointer. - let state = format!("test"); + let state = "test".to_string(); conn.lock_handle().await?.set_progress_handler(1, move || { assert_eq!(state, "test"); false @@ -802,7 +802,7 @@ async fn test_query_with_update_hook() -> anyhow::Result<()> { let mut conn = new::().await?; static CALLED: AtomicBool = AtomicBool::new(false); // Using this string as a canary to ensure the callback doesn't get called with the wrong data pointer. - let state = format!("test"); + let state = "test".to_string(); conn.lock_handle().await?.set_update_hook(move |result| { assert_eq!(state, "test"); assert_eq!(result.operation, SqliteOperation::Insert); @@ -858,7 +858,7 @@ async fn test_query_with_commit_hook() -> anyhow::Result<()> { let mut conn = new::().await?; static CALLED: AtomicBool = AtomicBool::new(false); // Using this string as a canary to ensure the callback doesn't get called with the wrong data pointer. - let state = format!("test"); + let state = "test".to_string(); conn.lock_handle().await?.set_commit_hook(move || { CALLED.store(true, Ordering::Relaxed); assert_eq!(state, "test"); @@ -920,7 +920,7 @@ async fn test_query_with_rollback_hook() -> anyhow::Result<()> { let mut conn = new::().await?; // Using this string as a canary to ensure the callback doesn't get called with the wrong data pointer. - let state = format!("test"); + let state = "test".to_string(); static CALLED: AtomicBool = AtomicBool::new(false); conn.lock_handle().await?.set_rollback_hook(move || { assert_eq!(state, "test"); @@ -977,7 +977,7 @@ async fn test_query_with_preupdate_hook_insert() -> anyhow::Result<()> { let mut conn = new::().await?; static CALLED: AtomicBool = AtomicBool::new(false); // Using this string as a canary to ensure the callback doesn't get called with the wrong data pointer. - let state = format!("test"); + let state = "test".to_string(); conn.lock_handle().await?.set_preupdate_hook({ move |result| { assert_eq!(state, "test"); @@ -1030,7 +1030,7 @@ async fn test_query_with_preupdate_hook_delete() -> anyhow::Result<()> { .await?; static CALLED: AtomicBool = AtomicBool::new(false); // Using this string as a canary to ensure the callback doesn't get called with the wrong data pointer. - let state = format!("test"); + let state = "test".to_string(); conn.lock_handle().await?.set_preupdate_hook(move |result| { assert_eq!(state, "test"); assert_eq!(result.operation, SqliteOperation::Delete); @@ -1077,7 +1077,7 @@ async fn test_query_with_preupdate_hook_update() -> anyhow::Result<()> { static CALLED: AtomicBool = AtomicBool::new(false); let sqlite_value_stored: Arc>> = Default::default(); // Using this string as a canary to ensure the callback doesn't get called with the wrong data pointer. - let state = format!("test"); + let state = "test".to_string(); conn.lock_handle().await?.set_preupdate_hook({ let sqlite_value_stored = sqlite_value_stored.clone(); move |result| { diff --git a/tests/sqlite/types.rs b/tests/sqlite/types.rs index 2497e406cc..008d04dd86 100644 --- a/tests/sqlite/types.rs +++ b/tests/sqlite/types.rs @@ -88,7 +88,7 @@ mod json_tests { .fetch_one(&mut conn) .await?; - assert_eq!(true, value); + assert!(value); Ok(()) }