From 6730b653c15a08c7c314e35f64da5d833a4a0201 Mon Sep 17 00:00:00 2001 From: leiysky Date: Tue, 23 Jan 2024 21:51:19 +0800 Subject: [PATCH] bump rust toolchain --- .github/actions/build_linux/action.yml | 10 ++-- .../actions/build_linux_sanitizer/action.yml | 2 +- .github/actions/build_macos/action.yml | 10 ++-- .github/actions/check/action.yml | 2 +- .github/actions/test_unit/action.yml | 2 +- rust-toolchain.toml | 2 +- .../arrow/src/arrow/array/boolean/mod.rs | 1 - .../src/arrow/array/dictionary/value_map.rs | 43 +++++++------- .../src/arrow/array/fixed_size_list/mod.rs | 1 - src/common/arrow/src/arrow/array/map/mod.rs | 1 - .../arrow/src/arrow/array/primitive/mod.rs | 1 - src/common/arrow/src/arrow/buffer/mod.rs | 1 + .../src/arrow/compute/sort/row/variable.rs | 2 +- .../parquet/read/deserialize/nested_utils.rs | 1 - .../read/deserialize/primitive/basic.rs | 1 - .../read/deserialize/primitive/dictionary.rs | 2 - .../read/deserialize/primitive/integer.rs | 1 - .../read/deserialize/primitive/nested.rs | 3 - .../arrow/src/arrow/types/simd/packed.rs | 2 +- src/common/arrow/src/lib.rs | 1 + .../tests/it/arrow/array/binary/mutable.rs | 2 +- .../base/src/runtime/runtime_tracker.rs | 7 ++- src/common/hashtable/src/lib.rs | 2 + src/common/io/src/bitmap.rs | 25 ++++---- .../src/cursor_ext/cursor_read_bytes_ext.rs | 4 +- src/common/io/src/lib.rs | 1 + src/meta/api/src/lib.rs | 1 - src/meta/embedded/src/kv_api_impl.rs | 1 - src/meta/kvapi/src/kvapi/test_suite.rs | 2 +- .../service/tests/it/grpc/metasrv_grpc_api.rs | 2 +- .../tests/it/meta_node/meta_node_lifecycle.rs | 6 +- src/meta/sled-store/tests/it/sled_iter.rs | 2 +- src/meta/types/src/grpc_helper.rs | 4 +- src/query/ast/src/ast/format/mod.rs | 2 - .../ast/src/ast/statements/merge_into.rs | 4 +- src/query/datavalues/src/lib.rs | 1 - .../it/aggregating_index/index_refresh.rs | 6 +- .../tests/it/aggregating_index/index_scan.rs | 6 +- .../src/converts/meta/index_scalar.rs | 1 + src/query/expression/src/filter/selector.rs | 4 +- src/query/expression/src/kernels/mod.rs | 1 - src/query/expression/src/lib.rs | 2 +- src/query/expression/src/types/boolean.rs | 2 +- src/query/expression/src/types/date.rs | 2 +- src/query/expression/src/types/string.rs | 2 +- src/query/expression/src/types/timestamp.rs | 2 +- src/query/expression/src/utils/block_debug.rs | 2 +- .../formats/src/field_decoder/json_ast.rs | 2 +- src/query/functions/src/lib.rs | 2 + src/query/pipeline/core/src/lib.rs | 1 + .../impls/input_format_parquet.rs | 2 +- .../input_formats/impls/input_format_tsv.rs | 2 +- src/query/pipeline/transforms/src/lib.rs | 2 + .../src/processors/transforms/sort/utils.rs | 4 +- .../src/api/rpc/exchange/exchange_manager.rs | 8 ++- .../interpreters/interpreter_index_refresh.rs | 19 ++++--- src/query/service/src/lib.rs | 3 +- .../processors/transforms/aggregator/mod.rs | 1 - .../group_by/aggregator_groups_builder.rs | 4 +- .../processors/transforms/group_by/mod.rs | 1 - .../processors/transforms/hash_join/mod.rs | 1 - .../hash_join/transform_hash_join_build.rs | 48 +++++++++++----- .../pipelines/processors/transforms/mod.rs | 3 - .../service/src/servers/federated_helper.rs | 4 +- .../service/src/servers/http/v1/query/mod.rs | 4 -- .../mysql/writers/query_result_writer.rs | 2 +- src/query/service/src/test_kits/fixture.rs | 8 +-- src/query/service/src/test_kits/fuse.rs | 4 +- .../service/tests/it/interpreters/union.rs | 8 +-- src/query/service/tests/it/sql/exec/mod.rs | 2 +- .../optimizer/agg_index_query_rewrite.rs | 2 +- .../storages/fuse/operations/alter_table.rs | 4 +- .../it/storages/fuse/operations/clustering.rs | 6 +- .../clustering_information_table.rs | 4 +- .../fuse/table_functions/fuse_block_table.rs | 6 +- .../src/accessor/share_table_meta_accessor.rs | 9 +-- .../sharing_endpoint/src/configs/inner.rs | 4 +- .../physical_plans/physical_hash_join.rs | 21 ++++--- .../sql/src/planner/binder/copy_into_table.rs | 12 ++-- src/query/sql/src/planner/binder/ddl/task.rs | 5 +- src/query/sql/src/planner/binder/replace.rs | 2 +- src/query/sql/src/planner/binder/table.rs | 12 ++-- .../sql/src/planner/binder/table_args.rs | 4 +- src/query/sql/src/planner/binder/values.rs | 4 +- .../planner/optimizer/property/histogram.rs | 4 +- .../rule/transform/rule_eager_aggregation.rs | 31 +++++----- .../sql/src/planner/semantic/type_check.rs | 57 +++++++++---------- .../common/cache/src/providers/mod.rs | 2 - .../index/tests/it/filters/bloom_filter.rs | 6 +- .../common/table_meta/src/meta/v1/mod.rs | 1 - src/query/storages/delta/src/dal.rs | 14 ++--- src/query/storages/fuse/src/fuse_table.rs | 6 +- .../fuse/src/io/write/block_writer.rs | 2 +- src/query/storages/fuse/src/io/write/mod.rs | 1 - src/query/storages/fuse/src/lib.rs | 2 +- .../mutation/mutator/block_compact_mutator.rs | 5 +- .../operations/read/runtime_filter_prunner.rs | 2 +- .../mutator/deletion_accumulator.rs | 6 +- src/query/storages/parquet/src/lib.rs | 4 +- .../storages/parquet/src/parquet_rs/mod.rs | 1 - .../parquet_reader/read_policy/mod.rs | 3 - .../parquet/src/parquet_rs/pruning.rs | 4 +- .../storages/parquet/tests/it/merge_io.rs | 6 +- src/query/storages/system/src/lib.rs | 1 + src/query/users/src/lib.rs | 1 - .../mode/cluster/memo/aggregate_property.test | 7 ++- .../mode/cluster/memo/join_property.test | 13 +++-- .../mode/cluster/memo/mix_property.test | 5 +- 108 files changed, 311 insertions(+), 288 deletions(-) diff --git a/.github/actions/build_linux/action.yml b/.github/actions/build_linux/action.yml index b5b3be03f74ae..68c768ebf97b7 100644 --- a/.github/actions/build_linux/action.yml +++ b/.github/actions/build_linux/action.yml @@ -70,9 +70,9 @@ runs: artifacts="meta,metactl,query,sqllogictests" for artifact in ${artifacts//,/ }; do echo "==> building databend-$artifact ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --bin databend-$artifact + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --bin databend-$artifact done - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --bin open-sharing + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --bin open-sharing ls -lh ./target/${{ inputs.target }}/${{ env.BUILD_PROFILE }}/databend-* - name: Build Debug for specific artifacts @@ -82,7 +82,7 @@ runs: artifacts="${{ inputs.artifacts }}" for artifact in ${artifacts//,/ }; do echo "==> building databend-$artifact ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --bin databend-$artifact + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --bin databend-$artifact done ls -lh ./target/${{ inputs.target }}/${{ env.BUILD_PROFILE }}/databend-$artifact @@ -93,7 +93,7 @@ runs: artifacts="meta,metactl,query,sqllogictests" for artifact in ${artifacts//,/ }; do echo "==> building databend-$artifact ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --release --bin databend-$artifact + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --release --bin databend-$artifact done ls -lh ./target/${{ inputs.target }}/${{ env.BUILD_PROFILE }}/databend-$artifact @@ -104,7 +104,7 @@ runs: artifacts="${{ inputs.artifacts }}" for artifact in ${artifacts//,/ }; do echo "==> building databend-$artifact ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --release --bin databend-$artifact + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --release --bin databend-$artifact done ls -lh ./target/${{ inputs.target }}/${{ env.BUILD_PROFILE }}/databend-$artifact diff --git a/.github/actions/build_linux_sanitizer/action.yml b/.github/actions/build_linux_sanitizer/action.yml index ebc7db8da49c7..7799a3e6d1426 100644 --- a/.github/actions/build_linux_sanitizer/action.yml +++ b/.github/actions/build_linux_sanitizer/action.yml @@ -53,7 +53,7 @@ runs: artifacts="meta,metactl,query" for artifact in ${artifacts//,/ }; do echo "==> building databend-$artifact with sanitizer ..." - cargo -Zbuild-std -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --manifest-path src/binaries/Cargo.toml --bin databend-$artifact + cargo -Zbuild-std -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --features ${{ inputs.features }} --manifest-path src/binaries/Cargo.toml --bin databend-$artifact done ls -lh ./target/${{ inputs.target }}/${{ env.BUILD_PROFILE }}/databend-* diff --git a/.github/actions/build_macos/action.yml b/.github/actions/build_macos/action.yml index b1d762a72834f..838e7e97db957 100644 --- a/.github/actions/build_macos/action.yml +++ b/.github/actions/build_macos/action.yml @@ -44,7 +44,7 @@ runs: - name: Build Debug for all artifacts if: env.BUILD_PROFILE == 'debug' && inputs.artifacts == 'all' shell: bash - run: cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} + run: cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} - name: Build Debug for specific artifacts if: env.BUILD_PROFILE == 'debug' && inputs.artifacts != 'all' @@ -52,10 +52,10 @@ runs: run: | artifacts="${{ inputs.artifacts }}" echo "==> building libs ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --lib + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --lib for artifact in ${artifacts//,/ }; do echo "==> building databend-$artifact ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --bin databend-$artifact + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --bin databend-$artifact done - name: Build Release @@ -64,10 +64,10 @@ runs: run: | artifacts="${{ inputs.artifacts }}" echo "==> building libs ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --release --lib + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --release --lib for artifact in ${artifacts//,/ }; do echo "==> building databend-$artifact ..." - cargo -Zgitoxide=fetch,shallow-index,shallow-deps build --target ${{ inputs.target }} --release --bin databend-$artifact + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps build --target ${{ inputs.target }} --release --bin databend-$artifact done - name: Upload artifact diff --git a/.github/actions/check/action.yml b/.github/actions/check/action.yml index d49dfda4f76cf..161e844958e70 100644 --- a/.github/actions/check/action.yml +++ b/.github/actions/check/action.yml @@ -47,4 +47,4 @@ runs: - name: Clippy shell: bash - run: cargo -Zgitoxide=fetch,shallow-index,shallow-deps clippy --workspace --all-targets --all-features -- -D warnings + run: cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps clippy --workspace --all-targets --all-features -- -D warnings diff --git a/.github/actions/test_unit/action.yml b/.github/actions/test_unit/action.yml index d79751f76d0d3..01e0940fbf40c 100644 --- a/.github/actions/test_unit/action.yml +++ b/.github/actions/test_unit/action.yml @@ -11,7 +11,7 @@ runs: - shell: bash run: | - cargo -Zgitoxide=fetch,shallow-index,shallow-deps nextest run --no-fail-fast --hide-progress-bar + cargo -Zgitoxide=fetch -Zgit=shallow-index,shallow-deps nextest run --no-fail-fast --hide-progress-bar env: RUST_TEST_THREADS: "8" RUST_LOG: ERROR diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6fe5ce579dc79..dde5ac00ce8e3 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2023-10-23" +channel = "nightly-2024-01-23" components = ["rustfmt", "clippy", "rust-src", "miri", "rust-analyzer"] diff --git a/src/common/arrow/src/arrow/array/boolean/mod.rs b/src/common/arrow/src/arrow/array/boolean/mod.rs index c858214d1ba0c..e8bf0b462f8c0 100644 --- a/src/common/arrow/src/arrow/array/boolean/mod.rs +++ b/src/common/arrow/src/arrow/array/boolean/mod.rs @@ -33,7 +33,6 @@ mod from; mod iterator; mod mutable; -pub use iterator::*; pub use mutable::*; /// A [`BooleanArray`] is Arrow's semantically equivalent of an immutable `Vec>`. diff --git a/src/common/arrow/src/arrow/array/dictionary/value_map.rs b/src/common/arrow/src/arrow/array/dictionary/value_map.rs index e3e84126387e1..d97370f8107e6 100644 --- a/src/common/arrow/src/arrow/array/dictionary/value_map.rs +++ b/src/common/arrow/src/arrow/array/dictionary/value_map.rs @@ -108,11 +108,12 @@ impl ValueMap { // safety: we only iterate within bounds let value = unsafe { values.value_unchecked_at(index) }; let hash = ahash_hash(value.borrow()); - match map.raw_entry_mut().from_hash(hash, |item| { + let entry = map.raw_entry_mut().from_hash(hash, |item| { // safety: invariant of the struct, it's always in bounds since we maintain it let stored_value = unsafe { values.value_unchecked_at(item.key.as_usize()) }; stored_value.borrow() == value.borrow() - }) { + }); + match entry { RawEntryMut::Occupied(_) => { return Err(Error::InvalidArgumentError( "duplicate value in dictionary values array".into(), @@ -158,25 +159,25 @@ impl ValueMap { M::Type: Eq + Hash, { let hash = ahash_hash(value.as_indexed()); - Ok( - match self.map.raw_entry_mut().from_hash(hash, |item| { - // safety: we've already checked (the inverse) when we pushed it, so it should be ok? - let index = unsafe { item.key.as_usize() }; - // safety: invariant of the struct, it's always in bounds since we maintain it - let stored_value = unsafe { self.values.value_unchecked_at(index) }; - stored_value.borrow() == value.as_indexed() - }) { - RawEntryMut::Occupied(entry) => entry.key().key, - RawEntryMut::Vacant(entry) => { - let index = self.values.len(); - let key = K::try_from(index).map_err(|_| Error::Overflow)?; - entry.insert_hashed_nocheck(hash, Hashed { hash, key }, ()); // NB: don't use .insert() here! - push(&mut self.values, value)?; - debug_assert_eq!(self.values.len(), index + 1); - key - } - }, - ) + let entry = self.map.raw_entry_mut().from_hash(hash, |item| { + // safety: we've already checked (the inverse) when we pushed it, so it should be ok? + let index = unsafe { item.key.as_usize() }; + // safety: invariant of the struct, it's always in bounds since we maintain it + let stored_value = unsafe { self.values.value_unchecked_at(index) }; + stored_value.borrow() == value.as_indexed() + }); + let result = match entry { + RawEntryMut::Occupied(entry) => entry.key().key, + RawEntryMut::Vacant(entry) => { + let index = self.values.len(); + let key = K::try_from(index).map_err(|_| Error::Overflow)?; + entry.insert_hashed_nocheck(hash, Hashed { hash, key }, ()); // NB: don't use .insert() here! + push(&mut self.values, value)?; + debug_assert_eq!(self.values.len(), index + 1); + key + } + }; + Ok(result) } pub fn shrink_to_fit(&mut self) { diff --git a/src/common/arrow/src/arrow/array/fixed_size_list/mod.rs b/src/common/arrow/src/arrow/array/fixed_size_list/mod.rs index 503fae3f8845e..7a212b879b620 100644 --- a/src/common/arrow/src/arrow/array/fixed_size_list/mod.rs +++ b/src/common/arrow/src/arrow/array/fixed_size_list/mod.rs @@ -26,7 +26,6 @@ mod data; mod ffi; pub(super) mod fmt; mod iterator; -pub use iterator::*; mod mutable; pub use mutable::*; diff --git a/src/common/arrow/src/arrow/array/map/mod.rs b/src/common/arrow/src/arrow/array/map/mod.rs index a14ec66722909..71d1fd4b8d250 100644 --- a/src/common/arrow/src/arrow/array/map/mod.rs +++ b/src/common/arrow/src/arrow/array/map/mod.rs @@ -27,7 +27,6 @@ mod data; mod ffi; pub(super) mod fmt; mod iterator; -pub use iterator::*; /// An array representing a (key, value), both of arbitrary logical types. #[derive(Clone)] diff --git a/src/common/arrow/src/arrow/array/primitive/mod.rs b/src/common/arrow/src/arrow/array/primitive/mod.rs index 990698a4bb4cb..1ea10ab6689da 100644 --- a/src/common/arrow/src/arrow/array/primitive/mod.rs +++ b/src/common/arrow/src/arrow/array/primitive/mod.rs @@ -35,7 +35,6 @@ mod ffi; pub(super) mod fmt; mod from_natural; mod iterator; -pub use iterator::*; mod mutable; pub use mutable::*; diff --git a/src/common/arrow/src/arrow/buffer/mod.rs b/src/common/arrow/src/arrow/buffer/mod.rs index 5e93bcae216ba..d3e82ce25bcc1 100644 --- a/src/common/arrow/src/arrow/buffer/mod.rs +++ b/src/common/arrow/src/arrow/buffer/mod.rs @@ -22,6 +22,7 @@ use std::ops::Deref; use crate::arrow::ffi::InternalArrowArray; +#[allow(dead_code)] pub(crate) enum BytesAllocator { InternalArrowArray(InternalArrowArray), diff --git a/src/common/arrow/src/arrow/compute/sort/row/variable.rs b/src/common/arrow/src/arrow/compute/sort/row/variable.rs index b68cf68457894..cf0a06f09f02b 100644 --- a/src/common/arrow/src/arrow/compute/sort/row/variable.rs +++ b/src/common/arrow/src/arrow/compute/sort/row/variable.rs @@ -59,7 +59,7 @@ pub fn encoded_len(a: Option<&[u8]>) -> usize { pub fn encode<'a, I: Iterator>>(out: &mut Rows, i: I, opts: SortOptions) { for (offset, maybe_val) in out.offsets.iter_mut().skip(1).zip(i) { match maybe_val { - Some(val) if val.is_empty() => { + Some([]) => { out.buffer[*offset] = match opts.descending { true => !EMPTY_SENTINEL, false => EMPTY_SENTINEL, diff --git a/src/common/arrow/src/arrow/io/parquet/read/deserialize/nested_utils.rs b/src/common/arrow/src/arrow/io/parquet/read/deserialize/nested_utils.rs index 9699dbc82e8ad..1af156c52a569 100644 --- a/src/common/arrow/src/arrow/io/parquet/read/deserialize/nested_utils.rs +++ b/src/common/arrow/src/arrow/io/parquet/read/deserialize/nested_utils.rs @@ -26,7 +26,6 @@ use super::super::Pages; use super::utils::DecodedState; use super::utils::MaybeNext; use super::utils::PageState; -pub use super::utils::Zip; use crate::arrow::array::Array; use crate::arrow::bitmap::MutableBitmap; use crate::arrow::error::Result; diff --git a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/basic.rs b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/basic.rs index 33fb9b99c281d..b1631a13cc3dd 100644 --- a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/basic.rs +++ b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/basic.rs @@ -321,7 +321,6 @@ impl Iter where I: Pages, T: NativeType, - P: ParquetNativeType, F: Copy + Fn(P) -> T, { diff --git a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/dictionary.rs b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/dictionary.rs index 58e6c4e5de7f8..e055afb9579a5 100644 --- a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/dictionary.rs +++ b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/dictionary.rs @@ -74,7 +74,6 @@ where K: DictionaryKey, I: Pages, T: NativeType, - P: ParquetNativeType, F: Copy + Fn(P) -> T, { @@ -153,7 +152,6 @@ where K: DictionaryKey, I: Pages, T: NativeType, - P: ParquetNativeType, F: Copy + Fn(P) -> T, { diff --git a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/integer.rs b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/integer.rs index 7412eae1ae459..fc529156e2296 100644 --- a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/integer.rs +++ b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/integer.rs @@ -226,7 +226,6 @@ impl IntegerIter where I: Pages, T: NativeType, - P: ParquetNativeType, F: Copy + Fn(P) -> T, { diff --git a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/nested.rs b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/nested.rs index 04d5bb3dd84a1..5323dbe6efda5 100644 --- a/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/nested.rs +++ b/src/common/arrow/src/arrow/io/parquet/read/deserialize/primitive/nested.rs @@ -187,7 +187,6 @@ pub struct NestedIter where I: Pages, T: NativeType, - P: ParquetNativeType, F: Copy + Fn(P) -> T, { @@ -205,7 +204,6 @@ impl NestedIter where I: Pages, T: NativeType, - P: ParquetNativeType, F: Copy + Fn(P) -> T, { @@ -234,7 +232,6 @@ impl Iterator for NestedIter where I: Pages, T: NativeType, - P: ParquetNativeType, F: Copy + Fn(P) -> T, { diff --git a/src/common/arrow/src/arrow/types/simd/packed.rs b/src/common/arrow/src/arrow/types/simd/packed.rs index 681872b1e0c82..0b414678cbce0 100644 --- a/src/common/arrow/src/arrow/types/simd/packed.rs +++ b/src/common/arrow/src/arrow/types/simd/packed.rs @@ -26,6 +26,7 @@ pub use std::simd::i8x8; pub use std::simd::mask32x16 as m32x16; pub use std::simd::mask64x8 as m64x8; pub use std::simd::mask8x64 as m8x64; +pub use std::simd::prelude::SimdPartialEq; pub use std::simd::u16x32; pub use std::simd::u16x8; pub use std::simd::u32x16; @@ -33,7 +34,6 @@ pub use std::simd::u32x8; pub use std::simd::u64x8; pub use std::simd::u8x64; pub use std::simd::u8x8; -pub use std::simd::SimdPartialEq; /// Vector of 32 16-bit masks #[allow(non_camel_case_types)] diff --git a/src/common/arrow/src/lib.rs b/src/common/arrow/src/lib.rs index b2137dc430815..168ac297e9410 100644 --- a/src/common/arrow/src/lib.rs +++ b/src/common/arrow/src/lib.rs @@ -13,6 +13,7 @@ // limitations under the License. #![feature(iter_advance_by)] +#![allow(clippy::unconditional_recursion)] #![cfg_attr(feature = "simd", feature(portable_simd))] #![allow(clippy::redundant_closure_call)] #![allow(clippy::non_canonical_partial_ord_impl)] diff --git a/src/common/arrow/tests/it/arrow/array/binary/mutable.rs b/src/common/arrow/tests/it/arrow/array/binary/mutable.rs index 85b62d6f7b234..d20b73dccf289 100644 --- a/src/common/arrow/tests/it/arrow/array/binary/mutable.rs +++ b/src/common/arrow/tests/it/arrow/array/binary/mutable.rs @@ -53,7 +53,7 @@ fn from_iter() { #[test] fn from_trusted_len_iter() { - let data = vec![vec![0; 0], vec![1; 1], vec![2; 2]]; + let data = [vec![0; 0], vec![1; 1], vec![2; 2]]; let a: MutableBinaryArray = data.iter().cloned().map(Some).collect(); assert_eq!(a.values().deref(), &[1u8, 2, 2]); assert_eq!(a.offsets().as_slice(), &[0, 0, 1, 3]); diff --git a/src/common/base/src/runtime/runtime_tracker.rs b/src/common/base/src/runtime/runtime_tracker.rs index a66fdc18f4926..94463e086bfc7 100644 --- a/src/common/base/src/runtime/runtime_tracker.rs +++ b/src/common/base/src/runtime/runtime_tracker.rs @@ -49,6 +49,7 @@ use std::fmt::Formatter; use std::future::Future; use std::mem::take; use std::pin::Pin; +use std::ptr::addr_of_mut; use std::sync::atomic::AtomicI64; use std::sync::atomic::Ordering; use std::sync::Arc; @@ -67,7 +68,7 @@ pub static GLOBAL_MEM_STAT: MemStat = MemStat::global(); // For implemented and needs to call drop, we cannot use the attribute tag thread local. // https://play.rust-lang.org/?version=nightly&mode=debug&edition=2021&gist=ea33533387d401e86423df1a764b5609 thread_local! { - static TRACKER: RefCell = RefCell::new(ThreadTracker::empty()); + static TRACKER: RefCell = const { RefCell::new(ThreadTracker::empty()) }; } #[thread_local] @@ -231,7 +232,7 @@ impl ThreadTracker { /// `size` is the positive number of allocated bytes. #[inline] pub fn alloc(size: i64) -> Result<(), AllocError> { - let state_buffer = unsafe { &mut STAT_BUFFER }; + let state_buffer = unsafe { &mut *addr_of_mut!(STAT_BUFFER) }; // Rust will alloc or dealloc memory after the thread local is destroyed when we using thread_local macro. // This is the boundary of thread exit. It may be dangerous to throw mistakes here. @@ -265,7 +266,7 @@ impl ThreadTracker { /// `size` is positive number of bytes of the memory to deallocate. #[inline] pub fn dealloc(size: i64) { - let state_buffer = unsafe { &mut STAT_BUFFER }; + let state_buffer = unsafe { &mut *addr_of_mut!(STAT_BUFFER) }; // Rust will alloc or dealloc memory after the thread local is destroyed when we using thread_local macro. if state_buffer.destroyed_thread_local_macro { diff --git a/src/common/hashtable/src/lib.rs b/src/common/hashtable/src/lib.rs index 9800028411e13..a01468b346728 100644 --- a/src/common/hashtable/src/lib.rs +++ b/src/common/hashtable/src/lib.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![allow(internal_features)] +#![allow(clippy::ptr_arg)] #![feature(core_intrinsics)] #![feature(allocator_api)] #![feature(arbitrary_self_types)] diff --git a/src/common/io/src/bitmap.rs b/src/common/io/src/bitmap.rs index f3f0804de7eef..b026db2e2197a 100644 --- a/src/common/io/src/bitmap.rs +++ b/src/common/io/src/bitmap.rs @@ -17,7 +17,7 @@ use databend_common_exception::Result; use roaring::RoaringTreemap; pub fn parse_bitmap(buf: &[u8]) -> Result { - match std::str::from_utf8(buf) + std::str::from_utf8(buf) .map_err(|e| e.to_string()) .and_then(|s| { let s: String = s.chars().filter(|c| !c.is_whitespace()).collect(); @@ -26,14 +26,17 @@ pub fn parse_bitmap(buf: &[u8]) -> Result { .map(|v| v.parse::().map_err(|e| e.to_string())) .collect(); result - }) { - Ok(v) => { - let rb = RoaringTreemap::from_iter(v.iter()); - Ok(rb) - } - Err(_) => Err(ErrorCode::BadBytes(format!( - "Invalid Bitmap value: {:?}", - String::from_utf8_lossy(buf) - ))), - } + }) + .map_or_else( + |_| { + Err(ErrorCode::BadBytes(format!( + "Invalid Bitmap value: {:?}", + String::from_utf8_lossy(buf) + ))) + }, + |v| { + let rb = RoaringTreemap::from_iter(v.iter()); + Ok(rb) + }, + ) } diff --git a/src/common/io/src/cursor_ext/cursor_read_bytes_ext.rs b/src/common/io/src/cursor_ext/cursor_read_bytes_ext.rs index edcfafac4b921..f6c155c0d0e3e 100644 --- a/src/common/io/src/cursor_ext/cursor_read_bytes_ext.rs +++ b/src/common/io/src/cursor_ext/cursor_read_bytes_ext.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use core::slice::memchr::memchr; use std::io::BufRead; use std::io::Cursor; use std::io::ErrorKind; @@ -175,8 +176,7 @@ where T: AsRef<[u8]> fn until(&mut self, delim: u8, buf: &mut Vec) -> usize { let remaining_slice = self.remaining_slice(); - let to_read = - core::slice::memchr::memchr(delim, remaining_slice).map_or(buf.len(), |n| n + 1); + let to_read = memchr(delim, remaining_slice).map_or(buf.len(), |n| n + 1); buf.extend_from_slice(&remaining_slice[..to_read]); self.consume(to_read); to_read diff --git a/src/common/io/src/lib.rs b/src/common/io/src/lib.rs index f4973235dcbe8..7bf0dca4cd271 100644 --- a/src/common/io/src/lib.rs +++ b/src/common/io/src/lib.rs @@ -15,6 +15,7 @@ // https://github.com/rust-lang/rust-clippy/issues/8334 #![allow(clippy::ptr_arg)] #![allow(clippy::uninlined_format_args)] +#![allow(internal_features)] #![feature(can_vector)] #![feature(read_buf)] #![feature(slice_internals)] diff --git a/src/meta/api/src/lib.rs b/src/meta/api/src/lib.rs index a032af7575b2f..f2f1cb301df71 100644 --- a/src/meta/api/src/lib.rs +++ b/src/meta/api/src/lib.rs @@ -15,7 +15,6 @@ #![allow(clippy::uninlined_format_args)] #![allow(clippy::diverging_sub_expression)] #![feature(const_fn_floating_point_arithmetic)] -#![feature(type_name_of_val)] extern crate databend_common_meta_types; diff --git a/src/meta/embedded/src/kv_api_impl.rs b/src/meta/embedded/src/kv_api_impl.rs index cb2e5bb19a539..6455c9db56c91 100644 --- a/src/meta/embedded/src/kv_api_impl.rs +++ b/src/meta/embedded/src/kv_api_impl.rs @@ -17,7 +17,6 @@ use databend_common_meta_kvapi::kvapi; use databend_common_meta_kvapi::kvapi::KVStream; use databend_common_meta_kvapi::kvapi::UpsertKVReply; use databend_common_meta_kvapi::kvapi::UpsertKVReq; -pub use databend_common_meta_sled_store::init_temp_sled_db; use databend_common_meta_types::MetaError; use databend_common_meta_types::TxnReply; use databend_common_meta_types::TxnRequest; diff --git a/src/meta/kvapi/src/kvapi/test_suite.rs b/src/meta/kvapi/src/kvapi/test_suite.rs index 7a9edce62adb2..04fbbc3be51e3 100644 --- a/src/meta/kvapi/src/kvapi/test_suite.rs +++ b/src/meta/kvapi/src/kvapi/test_suite.rs @@ -477,7 +477,7 @@ impl kvapi::TestSuite { fn check_transaction_responses( &self, reply: &TxnReply, - expected: &Vec, + expected: &[TxnOpResponse], success: bool, ) { assert_eq!(reply.success, success); diff --git a/src/meta/service/tests/it/grpc/metasrv_grpc_api.rs b/src/meta/service/tests/it/grpc/metasrv_grpc_api.rs index 17561ae0f3bba..6aba762159b5f 100644 --- a/src/meta/service/tests/it/grpc/metasrv_grpc_api.rs +++ b/src/meta/service/tests/it/grpc/metasrv_grpc_api.rs @@ -162,7 +162,7 @@ async fn test_join() -> anyhow::Result<()> { let client0 = tc0.grpc_client().await?; let client1 = tc1.grpc_client().await?; - let clients = vec![client0, client1]; + let clients = [client0, client1]; info!("--- upsert kv to every nodes"); { diff --git a/src/meta/service/tests/it/meta_node/meta_node_lifecycle.rs b/src/meta/service/tests/it/meta_node/meta_node_lifecycle.rs index f50406139a32f..35aebc53f1c25 100644 --- a/src/meta/service/tests/it/meta_node/meta_node_lifecycle.rs +++ b/src/meta/service/tests/it/meta_node/meta_node_lifecycle.rs @@ -179,7 +179,7 @@ async fn test_meta_node_join() -> anyhow::Result<()> { let mn2 = MetaNode::open_create(&tc2.config.raft_config, Some(()), None).await?; let mn3 = MetaNode::open_create(&tc3.config.raft_config, Some(()), None).await?; - let all = vec![mn0, mn1, mn2, mn3]; + let all = [mn0, mn1, mn2, mn3]; info!("--- check reopened memberships"); @@ -473,7 +473,7 @@ async fn test_meta_node_leave() -> anyhow::Result<()> { let mn0 = MetaNode::open_create(&tc0.config.raft_config, Some(()), None).await?; let mn2 = MetaNode::open_create(&tc2.config.raft_config, Some(()), None).await?; - let all = vec![mn0, mn2]; + let all = [mn0, mn2]; info!("--- check reopened memberships"); @@ -784,7 +784,7 @@ async fn assert_upsert_kv_synced(meta_nodes: Vec>, key: &str) -> a } // Assert applied index on every node - for (_i, mn) in meta_nodes.iter().enumerate() { + for mn in meta_nodes.iter() { mn.raft .wait(timeout()) .log( diff --git a/src/meta/sled-store/tests/it/sled_iter.rs b/src/meta/sled-store/tests/it/sled_iter.rs index 201292fdbaa86..0025d68a9bd48 100644 --- a/src/meta/sled-store/tests/it/sled_iter.rs +++ b/src/meta/sled-store/tests/it/sled_iter.rs @@ -107,7 +107,7 @@ async fn test_sled_iter() -> anyhow::Result<()> { // Iterator outputs Vec - let trees = vec![t1, t2]; + let trees = [t1, t2]; let mut got = vec![]; for tree_iter in databend_common_meta_sled_store::iter::>() { diff --git a/src/meta/types/src/grpc_helper.rs b/src/meta/types/src/grpc_helper.rs index f507371ec5d17..2e50d22390857 100644 --- a/src/meta/types/src/grpc_helper.rs +++ b/src/meta/types/src/grpc_helper.rs @@ -89,9 +89,7 @@ impl GrpcHelper { pub fn get_response_meta_leader(reply: &tonic::Response) -> Option { let metadata = reply.metadata(); - let Some(meta_leader) = metadata.get(HEADER_LEADER) else { - return None; - }; + let meta_leader = metadata.get(HEADER_LEADER)?; let s = match meta_leader.to_str() { Ok(x) => x, diff --git a/src/query/ast/src/ast/format/mod.rs b/src/query/ast/src/ast/format/mod.rs index 2e44fe521c7be..f5dce53dc8990 100644 --- a/src/query/ast/src/ast/format/mod.rs +++ b/src/query/ast/src/ast/format/mod.rs @@ -20,8 +20,6 @@ mod syntax; use std::fmt::Display; pub use ast_format::format_statement; -pub use indent_format::*; -pub use pretty_format::*; pub use syntax::pretty_statement; #[derive(Clone)] diff --git a/src/query/ast/src/ast/statements/merge_into.rs b/src/query/ast/src/ast/statements/merge_into.rs index 4a52efbcd134b..af452ffb4b286 100644 --- a/src/query/ast/src/ast/statements/merge_into.rs +++ b/src/query/ast/src/ast/statements/merge_into.rs @@ -172,7 +172,7 @@ impl MergeIntoStmt { (match_clauses, unmatch_clauses) } - pub fn check_multi_match_clauses_semantic(clauses: &Vec) -> Result<()> { + pub fn check_multi_match_clauses_semantic(clauses: &[MatchedClause]) -> Result<()> { // check match_clauses if clauses.len() > 1 { for (idx, clause) in clauses.iter().enumerate() { @@ -186,7 +186,7 @@ impl MergeIntoStmt { Ok(()) } - pub fn check_multi_unmatch_clauses_semantic(clauses: &Vec) -> Result<()> { + pub fn check_multi_unmatch_clauses_semantic(clauses: &[UnmatchedClause]) -> Result<()> { // check unmatch_clauses if clauses.len() > 1 { for (idx, clause) in clauses.iter().enumerate() { diff --git a/src/query/datavalues/src/lib.rs b/src/query/datavalues/src/lib.rs index d887c89235fd4..0ca4f2b0a2fb6 100644 --- a/src/query/datavalues/src/lib.rs +++ b/src/query/datavalues/src/lib.rs @@ -28,7 +28,6 @@ mod variant_value; pub use data_field::*; pub use data_schema::*; pub use data_value::*; -pub use prelude::*; pub use types::*; pub use variant_value::*; diff --git a/src/query/ee/tests/it/aggregating_index/index_refresh.rs b/src/query/ee/tests/it/aggregating_index/index_refresh.rs index 3e91fd822ac17..d9b5d65ff7ad3 100644 --- a/src/query/ee/tests/it/aggregating_index/index_refresh.rs +++ b/src/query/ee/tests/it/aggregating_index/index_refresh.rs @@ -423,7 +423,7 @@ async fn test_sync_agg_index_after_insert() -> Result<()> { } // Insert more data with insert into ... select ... - fixture + let _ = fixture .execute_query("INSERT INTO t0 SELECT * FROM t0") .await?; @@ -462,7 +462,7 @@ async fn test_sync_agg_index_after_copy_into() -> Result<()> { let index_id0 = create_index(ctx, index_name, original_query, query.as_str(), true).await?; // Copy into data - fixture.execute_query( + let _ =fixture.execute_query( "COPY INTO books FROM 'https://datafuse-1253727613.cos.ap-hongkong.myqcloud.com/data/books.csv' FILE_FORMAT = (TYPE = CSV);", ) .await?; @@ -580,7 +580,7 @@ async fn refresh_index( Some(l) => format!("REFRESH AGGREGATING INDEX {index_name} LIMIT {l}"), None => format!("REFRESH AGGREGATING INDEX {index_name}"), }; - execute_sql(ctx, &sql).await?; + let _ = execute_sql(ctx, &sql).await?; Ok(()) } diff --git a/src/query/ee/tests/it/aggregating_index/index_scan.rs b/src/query/ee/tests/it/aggregating_index/index_scan.rs index bcf3a0d7d754b..070bf98379899 100644 --- a/src/query/ee/tests/it/aggregating_index/index_scan.rs +++ b/src/query/ee/tests/it/aggregating_index/index_scan.rs @@ -93,7 +93,7 @@ async fn execute_plan(ctx: Arc, plan: &Plan) -> Result, index_name: &str) -> Result<()> { let sql = format!("DROP AGGREGATING INDEX {index_name}"); - execute_sql(ctx, &sql).await?; + let _ = execute_sql(ctx, &sql).await?; Ok(()) } @@ -634,7 +634,7 @@ async fn fuzz(ctx: Arc, params: FuzzParams) -> Result<()> { let num_index_blocks = (num_blocks as f64 * index_block_ratio) as usize; // Create agg index - execute_sql( + let _ = execute_sql( ctx.clone(), &format!("CREATE ASYNC AGGREGATING INDEX index AS {index_sql}"), ) @@ -657,7 +657,7 @@ async fn fuzz(ctx: Arc, params: FuzzParams) -> Result<()> { // Refresh index if num_index_blocks > 0 { - execute_sql( + let _ = execute_sql( ctx.clone(), &format!("REFRESH AGGREGATING INDEX index LIMIT {num_index_blocks}"), ) diff --git a/src/query/expression/src/converts/meta/index_scalar.rs b/src/query/expression/src/converts/meta/index_scalar.rs index c4419955d97f1..22f971cae3ee1 100644 --- a/src/query/expression/src/converts/meta/index_scalar.rs +++ b/src/query/expression/src/converts/meta/index_scalar.rs @@ -214,6 +214,7 @@ impl<'de> Deserialize<'de> for IndexColumn { } impl PartialEq for IndexColumn { + #[allow(clippy::unconditional_recursion)] fn eq(&self, other: &Self) -> bool { let a: Column = self.clone().into(); let b: Column = other.clone().into(); diff --git a/src/query/expression/src/filter/selector.rs b/src/query/expression/src/filter/selector.rs index b2f76d0729170..782333d165837 100644 --- a/src/query/expression/src/filter/selector.rs +++ b/src/query/expression/src/filter/selector.rs @@ -158,7 +158,7 @@ impl<'a> Selector<'a> { #[allow(clippy::too_many_arguments)] fn process_and( &self, - exprs: &mut Vec, + exprs: &mut [SelectExpr], true_selection: &mut [u32], false_selection: (&mut [u32], bool), mutable_true_idx: &mut usize, @@ -211,7 +211,7 @@ impl<'a> Selector<'a> { #[allow(clippy::too_many_arguments)] fn process_or( &self, - exprs: &mut Vec, + exprs: &mut [SelectExpr], true_selection: &mut [u32], false_selection: (&mut [u32], bool), mutable_true_idx: &mut usize, diff --git a/src/query/expression/src/kernels/mod.rs b/src/query/expression/src/kernels/mod.rs index 00244f55fcad1..c6485fb21310d 100644 --- a/src/query/expression/src/kernels/mod.rs +++ b/src/query/expression/src/kernels/mod.rs @@ -25,7 +25,6 @@ mod take_ranges; mod topk; mod utils; -pub use group_by::*; pub use group_by_hash::*; pub use sort::*; pub use take_chunks::*; diff --git a/src/query/expression/src/lib.rs b/src/query/expression/src/lib.rs index 55212a88048c2..e0098561a0133 100755 --- a/src/query/expression/src/lib.rs +++ b/src/query/expression/src/lib.rs @@ -14,6 +14,7 @@ #![allow(clippy::uninlined_format_args)] #![allow(clippy::len_without_is_empty)] +#![allow(internal_features)] // FIXME: we should avoid this by implementing Ord correctly. #![allow(clippy::non_canonical_partial_ord_impl)] #![allow(incomplete_features)] @@ -35,7 +36,6 @@ #![feature(trusted_len)] #![feature(iter_order_by)] #![feature(int_roundings)] -#![feature(trait_upcasting)] #![feature(lazy_cell)] #![feature(try_blocks)] diff --git a/src/query/expression/src/types/boolean.rs b/src/query/expression/src/types/boolean.rs index c3c8ed751e46e..d02723b56bc83 100644 --- a/src/query/expression/src/types/boolean.rs +++ b/src/query/expression/src/types/boolean.rs @@ -85,7 +85,7 @@ impl ValueType for BooleanType { } fn try_downcast_domain(domain: &Domain) -> Option { - domain.as_boolean().map(BooleanDomain::clone) + domain.as_boolean().cloned() } fn upcast_scalar(scalar: Self::Scalar) -> Scalar { diff --git a/src/query/expression/src/types/date.rs b/src/query/expression/src/types/date.rs index 12951630afa3e..92ff02ab0af1a 100644 --- a/src/query/expression/src/types/date.rs +++ b/src/query/expression/src/types/date.rs @@ -91,7 +91,7 @@ impl ValueType for DateType { } fn try_downcast_domain(domain: &Domain) -> Option> { - domain.as_date().map(SimpleDomain::clone) + domain.as_date().cloned() } fn try_downcast_builder(builder: &mut ColumnBuilder) -> Option<&mut Self::ColumnBuilder> { diff --git a/src/query/expression/src/types/string.rs b/src/query/expression/src/types/string.rs index 947ca363062cb..68ef772f930b2 100644 --- a/src/query/expression/src/types/string.rs +++ b/src/query/expression/src/types/string.rs @@ -69,7 +69,7 @@ impl ValueType for StringType { } fn try_downcast_domain(domain: &Domain) -> Option { - domain.as_string().map(StringDomain::clone) + domain.as_string().cloned() } fn try_downcast_builder(builder: &mut ColumnBuilder) -> Option<&mut Self::ColumnBuilder> { diff --git a/src/query/expression/src/types/timestamp.rs b/src/query/expression/src/types/timestamp.rs index 4fbf8e0a8ef54..808cb9f7c2de0 100644 --- a/src/query/expression/src/types/timestamp.rs +++ b/src/query/expression/src/types/timestamp.rs @@ -98,7 +98,7 @@ impl ValueType for TimestampType { } fn try_downcast_domain(domain: &Domain) -> Option> { - domain.as_timestamp().map(SimpleDomain::clone) + domain.as_timestamp().cloned() } fn try_downcast_builder(builder: &mut ColumnBuilder) -> Option<&mut Self::ColumnBuilder> { diff --git a/src/query/expression/src/utils/block_debug.rs b/src/query/expression/src/utils/block_debug.rs index d6be3d8afd48b..bdbc13d05d907 100644 --- a/src/query/expression/src/utils/block_debug.rs +++ b/src/query/expression/src/utils/block_debug.rs @@ -414,7 +414,7 @@ fn compute_render_widths( fn render_head( schema: &DataSchemaRef, widths: &mut [usize], - column_map: &mut Vec, + column_map: &mut [i32], header: &mut Vec, aligns: &mut Vec, ) { diff --git a/src/query/formats/src/field_decoder/json_ast.rs b/src/query/formats/src/field_decoder/json_ast.rs index 091ce9d315b33..a02c3dca78557 100644 --- a/src/query/formats/src/field_decoder/json_ast.rs +++ b/src/query/formats/src/field_decoder/json_ast.rs @@ -356,7 +356,7 @@ impl FieldJsonAstDecoder { } } - fn read_tuple(&self, fields: &mut Vec, value: &Value) -> Result<()> { + fn read_tuple(&self, fields: &mut [ColumnBuilder], value: &Value) -> Result<()> { match value { Value::Object(obj) => { if fields.len() != obj.len() { diff --git a/src/query/functions/src/lib.rs b/src/query/functions/src/lib.rs index 0a6661974fc1e..fe1c84cb4cfc7 100644 --- a/src/query/functions/src/lib.rs +++ b/src/query/functions/src/lib.rs @@ -14,6 +14,8 @@ #![allow(clippy::arc_with_non_send_sync)] #![allow(clippy::uninlined_format_args)] +#![allow(clippy::ptr_arg)] +#![allow(internal_features)] #![feature(core_intrinsics)] #![feature(box_patterns)] #![feature(type_ascription)] diff --git a/src/query/pipeline/core/src/lib.rs b/src/query/pipeline/core/src/lib.rs index 731667028d398..8930268e379ac 100644 --- a/src/query/pipeline/core/src/lib.rs +++ b/src/query/pipeline/core/src/lib.rs @@ -13,6 +13,7 @@ // limitations under the License. #![allow(clippy::arc_with_non_send_sync)] +#![allow(clippy::useless_asref)] pub mod processors; diff --git a/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs b/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs index 5377b6b8f5955..acffe6b176bf7 100644 --- a/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs +++ b/src/query/pipeline/sources/src/input_formats/impls/input_format_parquet.rs @@ -399,7 +399,7 @@ impl AligningStateTrait for ParquetAligningState { } } -fn get_used_fields(fields: &Vec, schema: &TableSchemaRef) -> Result> { +fn get_used_fields(fields: &[Field], schema: &TableSchemaRef) -> Result> { let mut read_fields = Vec::with_capacity(fields.len()); for f in schema.fields().iter() { if let Some(m) = fields diff --git a/src/query/pipeline/sources/src/input_formats/impls/input_format_tsv.rs b/src/query/pipeline/sources/src/input_formats/impls/input_format_tsv.rs index 78857563c5987..67d87957f5372 100644 --- a/src/query/pipeline/sources/src/input_formats/impls/input_format_tsv.rs +++ b/src/query/pipeline/sources/src/input_formats/impls/input_format_tsv.rs @@ -85,7 +85,7 @@ impl InputFormatTSV { field_delimiter: u8, field_decoder: &SeparatedTextDecoder, buf: &[u8], - columns: &mut Vec, + columns: &mut [ColumnBuilder], schema: &TableSchemaRef, columns_to_read: &Option>, default_values: &Option>, diff --git a/src/query/pipeline/transforms/src/lib.rs b/src/query/pipeline/transforms/src/lib.rs index 339e62d63f846..22b644411c64e 100644 --- a/src/query/pipeline/transforms/src/lib.rs +++ b/src/query/pipeline/transforms/src/lib.rs @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![allow(internal_features)] +#![allow(clippy::unconditional_recursion)] #![feature(core_intrinsics)] #![feature(int_roundings)] #![feature(binary_heap_as_slice)] diff --git a/src/query/pipeline/transforms/src/processors/transforms/sort/utils.rs b/src/query/pipeline/transforms/src/processors/transforms/sort/utils.rs index 0600ff22a2081..04ffff807ad2a 100644 --- a/src/query/pipeline/transforms/src/processors/transforms/sort/utils.rs +++ b/src/query/pipeline/transforms/src/processors/transforms/sort/utils.rs @@ -56,7 +56,9 @@ fn order_field_type(schema: &DataSchema, desc: &[SortColumnDescription]) -> Data #[inline(always)] pub fn add_order_field(schema: DataSchemaRef, desc: &[SortColumnDescription]) -> DataSchemaRef { - if let Some(f) = schema.fields.last() && f.name() == ORDER_COL_NAME { + if let Some(f) = schema.fields.last() + && f.name() == ORDER_COL_NAME + { schema } else { let mut fields = schema.fields().clone(); diff --git a/src/query/service/src/api/rpc/exchange/exchange_manager.rs b/src/query/service/src/api/rpc/exchange/exchange_manager.rs index 2a9512bd26ce3..1f063ca4ee72a 100644 --- a/src/query/service/src/api/rpc/exchange/exchange_manager.rs +++ b/src/query/service/src/api/rpc/exchange/exchange_manager.rs @@ -656,7 +656,9 @@ impl QueryCoordinator { .pipeline_build_res .as_ref() .map(|x| x.exchange_injector.clone()) - .unwrap(), + .ok_or_else(|| { + ErrorCode::Internal("Pipeline build result is none, It's a bug") + })?, )?; let mut build_res = fragment_coordinator.pipeline_build_res.unwrap(); @@ -706,7 +708,9 @@ impl QueryCoordinator { .pipeline_build_res .as_ref() .map(|x| x.exchange_injector.clone()) - .unwrap(), + .ok_or_else(|| { + ErrorCode::Internal("Pipeline build result is none, It's a bug") + })?, )?, ); } diff --git a/src/query/service/src/interpreters/interpreter_index_refresh.rs b/src/query/service/src/interpreters/interpreter_index_refresh.rs index 4058364f67e45..928c3c4fc436b 100644 --- a/src/query/service/src/interpreters/interpreter_index_refresh.rs +++ b/src/query/service/src/interpreters/interpreter_index_refresh.rs @@ -174,15 +174,16 @@ impl RefreshIndexInterpreter { }); // then, find the last refresh position. - let last = match source.parts.partitions.binary_search_by(|p| { - let fp = FusePartInfo::from_part(p).unwrap(); - fp.create_on - .partial_cmp(&self.plan.index_meta.updated_on) - .unwrap() - }) { - Ok(i) => i + 1, - Err(i) => i, - }; + let last = source + .parts + .partitions + .binary_search_by(|p| { + let fp = FusePartInfo::from_part(p).unwrap(); + fp.create_on + .partial_cmp(&self.plan.index_meta.updated_on) + .unwrap() + }) + .map_or_else(|i| i, |i| i + 1); // finally, skip the refreshed partitions. source.parts.partitions = match self.plan.limit { diff --git a/src/query/service/src/lib.rs b/src/query/service/src/lib.rs index b9152a509b7b0..bbd2a0203e78d 100644 --- a/src/query/service/src/lib.rs +++ b/src/query/service/src/lib.rs @@ -13,6 +13,8 @@ // limitations under the License. #![feature(int_roundings)] +#![allow(internal_features)] +#![allow(clippy::useless_asref)] #![allow(clippy::uninlined_format_args)] #![feature(hash_raw_entry)] #![feature(core_intrinsics)] @@ -23,7 +25,6 @@ #![feature(box_patterns)] #![feature(sync_unsafe_cell)] #![feature(option_get_or_insert_default)] -#![feature(result_option_inspect)] #![feature(result_flattening)] #![feature(iterator_try_reduce)] #![feature(cursor_remaining)] diff --git a/src/query/service/src/pipelines/processors/transforms/aggregator/mod.rs b/src/query/service/src/pipelines/processors/transforms/aggregator/mod.rs index 8197fd8d552a8..dd64e6e6723a6 100644 --- a/src/query/service/src/pipelines/processors/transforms/aggregator/mod.rs +++ b/src/query/service/src/pipelines/processors/transforms/aggregator/mod.rs @@ -37,7 +37,6 @@ pub use transform_aggregate_partial::TransformPartialAggregate; pub use transform_group_by_final::TransformFinalGroupBy; pub use transform_group_by_partial::TransformPartialGroupBy; pub use transform_partition_bucket::build_partition_bucket; -pub use transform_partition_bucket::TransformPartitionBucket; pub use transform_single_key::FinalSingleStateAggregator; pub use transform_single_key::PartialSingleStateAggregator; pub use utils::*; diff --git a/src/query/service/src/pipelines/processors/transforms/group_by/aggregator_groups_builder.rs b/src/query/service/src/pipelines/processors/transforms/group_by/aggregator_groups_builder.rs index 2da73903fdefb..194fe626d46f2 100644 --- a/src/query/service/src/pipelines/processors/transforms/group_by/aggregator_groups_builder.rs +++ b/src/query/service/src/pipelines/processors/transforms/group_by/aggregator_groups_builder.rs @@ -136,7 +136,7 @@ impl<'a> GroupColumnsBuilder for SerializedKeysGroupColumnsBuilder<'a> { for data_type in self.group_data_types.iter() { let mut column = ColumnBuilder::with_capacity(data_type, rows); - for (_, key) in keys.iter_mut().enumerate() { + for key in keys.iter_mut() { column.push_binary(key)?; } res.push(column.build()); @@ -200,7 +200,7 @@ impl<'a> GroupColumnsBuilder for DictionarySerializedKeysGroupColumnsBuilder<'a> } else { let mut column = ColumnBuilder::with_capacity(data_type, rows); - for (_, key) in other_type_keys.iter_mut().enumerate() { + for key in other_type_keys.iter_mut() { column.push_binary(key)?; } diff --git a/src/query/service/src/pipelines/processors/transforms/group_by/mod.rs b/src/query/service/src/pipelines/processors/transforms/group_by/mod.rs index 9ac6cf1659e6b..6604c9ef790e4 100644 --- a/src/query/service/src/pipelines/processors/transforms/group_by/mod.rs +++ b/src/query/service/src/pipelines/processors/transforms/group_by/mod.rs @@ -25,6 +25,5 @@ pub use aggregator_keys_builder::*; pub use aggregator_keys_iter::*; pub use aggregator_polymorphic_keys::*; pub use aggregator_state::*; -pub use aggregator_state_entity::*; pub const BUCKETS_LG2: u32 = 8; diff --git a/src/query/service/src/pipelines/processors/transforms/hash_join/mod.rs b/src/query/service/src/pipelines/processors/transforms/hash_join/mod.rs index 2c7660191737f..fc4b731b1de77 100644 --- a/src/query/service/src/pipelines/processors/transforms/hash_join/mod.rs +++ b/src/query/service/src/pipelines/processors/transforms/hash_join/mod.rs @@ -38,6 +38,5 @@ pub use hash_join_probe_state::HashJoinProbeState; pub use hash_join_state::*; pub use probe_spill::ProbeSpillState; pub use probe_state::ProbeState; -pub use result_blocks::*; pub use transform_hash_join_build::TransformHashJoinBuild; pub use transform_hash_join_probe::TransformHashJoinProbe; diff --git a/src/query/service/src/pipelines/processors/transforms/hash_join/transform_hash_join_build.rs b/src/query/service/src/pipelines/processors/transforms/hash_join/transform_hash_join_build.rs index a0c4949bd290e..4be9fd92565ef 100644 --- a/src/query/service/src/pipelines/processors/transforms/hash_join/transform_hash_join_build.rs +++ b/src/query/service/src/pipelines/processors/transforms/hash_join/transform_hash_join_build.rs @@ -156,7 +156,9 @@ impl Processor for TransformHashJoinBuild { fn event(&mut self) -> Result { match self.step { HashJoinBuildStep::Running => { - if let Some(spill_state) = self.spill_state.as_ref() && !self.from_spill { + if let Some(spill_state) = self.spill_state.as_ref() + && !self.from_spill + { if spill_state.check_need_spill()? { spill_state.spill_coordinator.need_spill()?; self.wait_spill()?; @@ -175,25 +177,45 @@ impl Processor for TransformHashJoinBuild { } if self.input_port.is_finished() { - if let Some(spill_state) = self.spill_state.as_mut() && !self.from_spill { + if let Some(spill_state) = self.spill_state.as_mut() + && !self.from_spill + { // The processor won't be triggered spill, because there won't be data from input port // Add the processor to `non_spill_processors` let spill_coordinator = &spill_state.spill_coordinator; - let mut non_spill_processors = spill_coordinator.non_spill_processors.write(); + let mut non_spill_processors = + spill_coordinator.non_spill_processors.write(); *non_spill_processors += 1; - let waiting_spill_count = spill_coordinator.waiting_spill_count.load(Ordering::Acquire); - info!("waiting_spill_count: {:?}, non_spill_processors: {:?}, total_builder_count: {:?}", waiting_spill_count, *non_spill_processors, spill_state.spill_coordinator.total_builder_count); - if (waiting_spill_count != 0 && *non_spill_processors + waiting_spill_count == spill_state.spill_coordinator.total_builder_count) && spill_coordinator.get_need_spill() { + let waiting_spill_count = spill_coordinator + .waiting_spill_count + .load(Ordering::Acquire); + info!( + "waiting_spill_count: {:?}, non_spill_processors: {:?}, total_builder_count: {:?}", + waiting_spill_count, + *non_spill_processors, + spill_state.spill_coordinator.total_builder_count + ); + if (waiting_spill_count != 0 + && *non_spill_processors + waiting_spill_count + == spill_state.spill_coordinator.total_builder_count) + && spill_coordinator.get_need_spill() + { spill_coordinator.no_need_spill(); drop(non_spill_processors); let mut spill_task = spill_coordinator.spill_tasks.lock(); - spill_state.split_spill_tasks(spill_coordinator.active_processor_num(), &mut spill_task)?; - spill_coordinator.waiting_spill_count.store(0, Ordering::Relaxed); - spill_coordinator.ready_spill_watcher.send(true).map_err(|_| { - ErrorCode::TokioError( - "ready_spill_watcher channel is closed", - ) - })?; + spill_state.split_spill_tasks( + spill_coordinator.active_processor_num(), + &mut spill_task, + )?; + spill_coordinator + .waiting_spill_count + .store(0, Ordering::Relaxed); + spill_coordinator + .ready_spill_watcher + .send(true) + .map_err(|_| { + ErrorCode::TokioError("ready_spill_watcher channel is closed") + })?; } } self.build_state.row_space_build_done()?; diff --git a/src/query/service/src/pipelines/processors/transforms/mod.rs b/src/query/service/src/pipelines/processors/transforms/mod.rs index 6c56fcd3541c5..8c22f7060a7fd 100644 --- a/src/query/service/src/pipelines/processors/transforms/mod.rs +++ b/src/query/service/src/pipelines/processors/transforms/mod.rs @@ -41,13 +41,11 @@ pub use hash_join::*; pub use processor_accumulate_row_number::AccumulateRowNumber; pub use processor_deduplicate_row_number::DeduplicateRowNumber; pub use processor_extract_hash_table_by_row_number::ExtractHashTableByRowNumber; -pub use range_join::RangeJoinState; pub use transform_add_computed_columns::TransformAddComputedColumns; pub use transform_add_const_columns::TransformAddConstColumns; pub use transform_add_internal_columns::TransformAddInternalColumns; pub use transform_add_stream_columns::TransformAddStreamColumns; pub use transform_cast_schema::TransformCastSchema; -pub use transform_create_sets::SubqueryReceiver; pub use transform_create_sets::TransformCreateSets; pub use transform_filter::TransformFilter; pub use transform_limit::TransformLimit; @@ -59,7 +57,6 @@ pub use transform_resort_addon::TransformResortAddOn; pub use transform_resort_addon_without_source_schema::TransformResortAddOnWithoutSourceSchema; pub use transform_runtime_cast_schema::TransformRuntimeCastSchema; pub use transform_sort_spill::create_transform_sort_spill; -pub use transform_sort_spill::TransformSortSpill; pub use transform_srf::TransformSRF; pub use transform_udf::TransformUdf; pub use window::FrameBound; diff --git a/src/query/service/src/servers/federated_helper.rs b/src/query/service/src/servers/federated_helper.rs index 6e6a102deb443..a0efb2032320d 100644 --- a/src/query/service/src/servers/federated_helper.rs +++ b/src/query/service/src/servers/federated_helper.rs @@ -28,7 +28,7 @@ impl FederatedHelper { query: &str, rules: &[(Regex, Option<(TableSchemaRef, DataBlock)>)], ) -> Option<(TableSchemaRef, DataBlock)> { - for (_index, (regex, data)) in rules.iter().enumerate() { + for (regex, data) in rules.iter() { if regex.is_match(query) { return match data { None => Some((TableSchemaRefExt::create(vec![]), DataBlock::empty())), @@ -44,7 +44,7 @@ impl FederatedHelper { query: &str, rules: &[(Regex, LazyBlockFunc)], ) -> Option<(TableSchemaRef, DataBlock)> { - for (_index, (regex, func)) in rules.iter().enumerate() { + for (regex, func) in rules.iter() { if regex.is_match(query) { return match func(query) { None => Some((TableSchemaRefExt::create(vec![]), DataBlock::empty())), diff --git a/src/query/service/src/servers/http/v1/query/mod.rs b/src/query/service/src/servers/http/v1/query/mod.rs index ca2328a02f7f2..4efa40d7cd55f 100644 --- a/src/query/service/src/servers/http/v1/query/mod.rs +++ b/src/query/service/src/servers/http/v1/query/mod.rs @@ -27,16 +27,12 @@ pub(crate) use execute_state::Executor; pub use execute_state::Progresses; pub use expirable::ExpiringState; pub use expiring_map::ExpiringMap; -pub use http_query::HttpQuery; pub use http_query::HttpQueryRequest; pub use http_query::HttpQueryResponseInternal; pub use http_query::HttpSessionConf; -pub use http_query::PaginationConf; -pub use http_query::ResponseState; pub use http_query_context::HttpQueryContext; pub use http_query_manager::HttpQueryManager; pub(crate) use http_query_manager::RemoveReason; -pub use page_manager::Page; pub use page_manager::PageManager; pub use page_manager::ResponseData; pub use page_manager::Wait; diff --git a/src/query/service/src/servers/mysql/writers/query_result_writer.rs b/src/query/service/src/servers/mysql/writers/query_result_writer.rs index d002df967685b..cb13a6b49ffd7 100644 --- a/src/query/service/src/servers/mysql/writers/query_result_writer.rs +++ b/src/query/service/src/servers/mysql/writers/query_result_writer.rs @@ -231,7 +231,7 @@ impl<'a, W: AsyncWrite + Send + Unpin> DFQueryResultWriter<'a, W> { .collect::>(); for row_index in 0..num_rows { - for (_col_index, column) in columns.iter().enumerate() { + for column in columns.iter() { let value = unsafe { column.index_unchecked(row_index) }; match value { ScalarRef::Null => { diff --git a/src/query/service/src/test_kits/fixture.rs b/src/query/service/src/test_kits/fixture.rs index 0326c300c0916..5dd5ad8f5e84c 100644 --- a/src/query/service/src/test_kits/fixture.rs +++ b/src/query/service/src/test_kits/fixture.rs @@ -432,7 +432,7 @@ impl TestFixture { let create_table_plan = self.default_create_table_plan(); let interpreter = CreateTableInterpreter::try_create(self.default_ctx.clone(), create_table_plan)?; - interpreter.execute(self.default_ctx.clone()).await?; + let _ = interpreter.execute(self.default_ctx.clone()).await?; Ok(()) } @@ -440,7 +440,7 @@ impl TestFixture { let create_table_plan = self.normal_create_table_plan(); let interpreter = CreateTableInterpreter::try_create(self.default_ctx.clone(), create_table_plan)?; - interpreter.execute(self.default_ctx.clone()).await?; + let _ = interpreter.execute(self.default_ctx.clone()).await?; Ok(()) } @@ -448,7 +448,7 @@ impl TestFixture { let create_table_plan = self.variant_create_table_plan(); let interpreter = CreateTableInterpreter::try_create(self.default_ctx.clone(), create_table_plan)?; - interpreter.execute(self.default_ctx.clone()).await?; + let _ = interpreter.execute(self.default_ctx.clone()).await?; Ok(()) } @@ -481,7 +481,7 @@ impl TestFixture { let create_table_plan = self.computed_create_table_plan(); let interpreter = CreateTableInterpreter::try_create(self.default_ctx.clone(), create_table_plan)?; - interpreter.execute(self.default_ctx.clone()).await?; + let _ = interpreter.execute(self.default_ctx.clone()).await?; Ok(()) } diff --git a/src/query/service/src/test_kits/fuse.rs b/src/query/service/src/test_kits/fuse.rs index 41983ebd10b5b..573647b3151df 100644 --- a/src/query/service/src/test_kits/fuse.rs +++ b/src/query/service/src/test_kits/fuse.rs @@ -276,13 +276,13 @@ pub async fn analyze_table(fixture: &TestFixture) -> Result<()> { pub async fn do_deletion(ctx: Arc, plan: DeletePlan) -> Result<()> { let delete_interpreter = DeleteInterpreter::try_create(ctx.clone(), plan.clone())?; - delete_interpreter.execute(ctx).await?; + let _ = delete_interpreter.execute(ctx).await?; Ok(()) } pub async fn do_update(ctx: Arc, plan: UpdatePlan) -> Result<()> { let update_interpreter = UpdateInterpreter::try_create(ctx.clone(), plan)?; - update_interpreter.execute(ctx).await?; + let _ = update_interpreter.execute(ctx).await?; Ok(()) } diff --git a/src/query/service/tests/it/interpreters/union.rs b/src/query/service/tests/it/interpreters/union.rs index f5f42b168c2a4..1daf2b5ef5d5f 100644 --- a/src/query/service/tests/it/interpreters/union.rs +++ b/src/query/service/tests/it/interpreters/union.rs @@ -85,8 +85,8 @@ async fn test_simple_union_output_type() -> Result<()> { { let fixture = TestFixture::setup().await?; - execute_sql(fixture.new_query_ctx().await?, "create table a (a int)").await?; - execute_sql(fixture.new_query_ctx().await?, "create table b (b double)").await?; + let _ = execute_sql(fixture.new_query_ctx().await?, "create table a (a int)").await?; + let _ = execute_sql(fixture.new_query_ctx().await?, "create table b (b double)").await?; let (_, schema) = get_interpreter( fixture.new_query_ctx().await?, "select * from a union all select * from b", @@ -167,10 +167,10 @@ async fn test_union_output_type() -> Result<()> { // Prepare tables let sql1 = create_all_types_table_sql("t1"); let plan1 = plan_sql(fixture.new_query_ctx().await?, &sql1).await?; - execute_plan(fixture.new_query_ctx().await?, &plan1).await?; + let _ = execute_plan(fixture.new_query_ctx().await?, &plan1).await?; let sql2 = create_all_types_table_sql("t2"); let plan2 = plan_sql(fixture.new_query_ctx().await?, &sql2).await?; - execute_plan(fixture.new_query_ctx().await?, &plan2).await?; + let _ = execute_plan(fixture.new_query_ctx().await?, &plan2).await?; let table_schema = table_schema(&plan1); let table_fields = table_schema.fields(); diff --git a/src/query/service/tests/it/sql/exec/mod.rs b/src/query/service/tests/it/sql/exec/mod.rs index c9d5510309b4b..24ca24d34bff1 100644 --- a/src/query/service/tests/it/sql/exec/mod.rs +++ b/src/query/service/tests/it/sql/exec/mod.rs @@ -152,7 +152,7 @@ pub async fn test_snapshot_consistency() -> Result<()> { if let Plan::OptimizeTable(plan) = compact_plan { let optimize_interpreter = OptimizeTableInterpreter::try_create(ctx.clone(), *plan.clone())?; - optimize_interpreter.execute(ctx).await?; + let _ = optimize_interpreter.execute(ctx).await?; } Ok::<(), ErrorCode>(()) }; diff --git a/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs b/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs index db734348e5a33..95cf3c848c93b 100644 --- a/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs +++ b/src/query/service/tests/it/sql/planner/optimizer/agg_index_query_rewrite.rs @@ -488,7 +488,7 @@ async fn test_query_rewrite_impl(format: &str) -> Result<()> { let ctx = fixture.new_query_ctx().await?; let create_table_plan = create_table_plan(&fixture, format); let interpreter = CreateTableInterpreter::try_create(ctx.clone(), create_table_plan)?; - interpreter.execute(ctx.clone()).await?; + let _ = interpreter.execute(ctx.clone()).await?; let test_suites = get_test_suites(); for suite in test_suites { diff --git a/src/query/service/tests/it/storages/fuse/operations/alter_table.rs b/src/query/service/tests/it/storages/fuse/operations/alter_table.rs index 1a06d45fc8c8e..9018113eb9fea 100644 --- a/src/query/service/tests/it/storages/fuse/operations/alter_table.rs +++ b/src/query/service/tests/it/storages/fuse/operations/alter_table.rs @@ -164,7 +164,7 @@ async fn test_fuse_table_optimize_alter_table() -> Result<()> { }; let ctx = fixture.new_query_ctx().await?; let interpreter = DropTableColumnInterpreter::try_create(ctx.clone(), drop_table_column_plan)?; - interpreter.execute(ctx.clone()).await?; + let _ = interpreter.execute(ctx.clone()).await?; // add a column of uint64 with default value `(1,15.0)` let field = TableField::new("b", TableDataType::Tuple { @@ -186,7 +186,7 @@ async fn test_fuse_table_optimize_alter_table() -> Result<()> { option: AddColumnOption::End, }; let interpreter = AddTableColumnInterpreter::try_create(ctx.clone(), add_table_column_plan)?; - interpreter.execute(ctx.clone()).await?; + let _ = interpreter.execute(ctx.clone()).await?; // insert values for new schema let block = { diff --git a/src/query/service/tests/it/storages/fuse/operations/clustering.rs b/src/query/service/tests/it/storages/fuse/operations/clustering.rs index 5cbbf3f1a4555..6afcf1c980582 100644 --- a/src/query/service/tests/it/storages/fuse/operations/clustering.rs +++ b/src/query/service/tests/it/storages/fuse/operations/clustering.rs @@ -61,7 +61,7 @@ async fn test_fuse_alter_table_cluster_key() -> databend_common_exception::Resul // create test table let interpreter = CreateTableInterpreter::try_create(ctx.clone(), create_table_plan)?; - interpreter.execute(ctx.clone()).await?; + let _ = interpreter.execute(ctx.clone()).await?; // add cluster key let alter_table_cluster_key_plan = AlterTableClusterKeyPlan { @@ -73,7 +73,7 @@ async fn test_fuse_alter_table_cluster_key() -> databend_common_exception::Resul }; let interpreter = AlterTableClusterKeyInterpreter::try_create(ctx.clone(), alter_table_cluster_key_plan)?; - interpreter.execute(ctx.clone()).await?; + let _ = interpreter.execute(ctx.clone()).await?; let table = fixture.latest_default_table().await?; let fuse_table = FuseTable::try_from_table(table.as_ref())?; @@ -109,7 +109,7 @@ async fn test_fuse_alter_table_cluster_key() -> databend_common_exception::Resul }; let interpreter = DropTableClusterKeyInterpreter::try_create(ctx.clone(), drop_table_cluster_key_plan)?; - interpreter.execute(ctx.clone()).await?; + let _ = interpreter.execute(ctx.clone()).await?; let table = fixture.latest_default_table().await?; let fuse_table = FuseTable::try_from_table(table.as_ref())?; diff --git a/src/query/service/tests/it/storages/fuse/table_functions/clustering_information_table.rs b/src/query/service/tests/it/storages/fuse/table_functions/clustering_information_table.rs index f5691e5ca3033..df19a477717d2 100644 --- a/src/query/service/tests/it/storages/fuse/table_functions/clustering_information_table.rs +++ b/src/query/service/tests/it/storages/fuse/table_functions/clustering_information_table.rs @@ -68,7 +68,7 @@ async fn test_clustering_information_table_read() -> Result<()> { { let qry = format!("insert into {}.{} values(1, (2, 3)),(2, (4, 6))", db, tbl); - execute_query(ctx.clone(), qry.as_str()).await?; + let _ = execute_query(ctx.clone(), qry.as_str()).await?; let expected = vec![ "+----------+----------+----------+----------+----------+----------+-------------+", "| Column 0 | Column 1 | Column 2 | Column 3 | Column 4 | Column 5 | Column 6 |", @@ -90,7 +90,7 @@ async fn test_clustering_information_table_read() -> Result<()> { { // incompatible table engine let qry = format!("create table {}.in_mem (a int) engine =Memory", db); - execute_query(ctx.clone(), qry.as_str()).await?; + let _ = execute_query(ctx.clone(), qry.as_str()).await?; let qry = format!( "select * from clustering_information('{}', '{}')", diff --git a/src/query/service/tests/it/storages/fuse/table_functions/fuse_block_table.rs b/src/query/service/tests/it/storages/fuse/table_functions/fuse_block_table.rs index 9c278275be259..2ac1d8a834e80 100644 --- a/src/query/service/tests/it/storages/fuse/table_functions/fuse_block_table.rs +++ b/src/query/service/tests/it/storages/fuse/table_functions/fuse_block_table.rs @@ -53,9 +53,9 @@ async fn test_fuse_block_table() -> Result<()> { { let qry = format!("insert into {}.{} values(1, (2, 3)),(2, (4, 6))", db, tbl); - execute_query(ctx.clone(), qry.as_str()).await?; + let _ = execute_query(ctx.clone(), qry.as_str()).await?; let qry = format!("insert into {}.{} values(7, (8, 9))", db, tbl); - execute_query(ctx.clone(), qry.as_str()).await?; + let _ = execute_query(ctx.clone(), qry.as_str()).await?; let expected = vec![ "+----------+", "| Column 0 |", @@ -80,7 +80,7 @@ async fn test_fuse_block_table() -> Result<()> { { // incompatible table engine let qry = format!("create table {}.in_mem (a int) engine =Memory", db); - execute_query(ctx.clone(), qry.as_str()).await?; + let _ = execute_query(ctx.clone(), qry.as_str()).await?; let qry = format!("select * from fuse_block('{}', '{}')", db, "in_mem"); let output_stream = execute_query(ctx.clone(), qry.as_str()).await?; diff --git a/src/query/sharing_endpoint/src/accessor/share_table_meta_accessor.rs b/src/query/sharing_endpoint/src/accessor/share_table_meta_accessor.rs index af9109da54f5f..341abb7be7a8d 100644 --- a/src/query/sharing_endpoint/src/accessor/share_table_meta_accessor.rs +++ b/src/query/sharing_endpoint/src/accessor/share_table_meta_accessor.rs @@ -34,12 +34,9 @@ impl SharingAccessor { if input.request_tables.is_empty() { Ok(share_table_map) } else { - Ok(BTreeMap::from_iter( - share_table_map - .into_iter() - .filter(|(table_name, _table_info)| input.request_tables.contains(table_name)) - .map(|(table_name, table_info)| (table_name, table_info)), - )) + Ok(BTreeMap::from_iter(share_table_map.into_iter().filter( + |(table_name, _table_info)| input.request_tables.contains(table_name), + ))) } } } diff --git a/src/query/sharing_endpoint/src/configs/inner.rs b/src/query/sharing_endpoint/src/configs/inner.rs index 09aabf53b591e..b4f684dac7f92 100644 --- a/src/query/sharing_endpoint/src/configs/inner.rs +++ b/src/query/sharing_endpoint/src/configs/inner.rs @@ -29,7 +29,7 @@ impl Config { /// /// In the future, we could have `ConfigV1` and `ConfigV2`. pub async fn load() -> Result { - let mut cfg: Self = OuterV0Config::load(true)?.try_into()?; + let mut cfg: Self = OuterV0Config::load(true)?.into(); cfg.storage.params = cfg.storage.params.auto_detect().await?; Ok(cfg) @@ -39,7 +39,7 @@ impl Config { /// /// This function is served for tests only. pub fn load_for_test() -> Result { - let cfg: Self = OuterV0Config::load(false)?.try_into()?; + let cfg: Self = OuterV0Config::load(false)?.into(); Ok(cfg) } diff --git a/src/query/sql/src/executor/physical_plans/physical_hash_join.rs b/src/query/sql/src/executor/physical_plans/physical_hash_join.rs index 31be0ba434c74..1e92ce5a542a1 100644 --- a/src/query/sql/src/executor/physical_plans/physical_hash_join.rs +++ b/src/query/sql/src/executor/physical_plans/physical_hash_join.rs @@ -365,17 +365,22 @@ impl PhysicalPlanBuilder { (build_fields, probe_fields) }; for field in dropped_fields.iter() { - if result_fields.iter().all(|x| x.name() != field.name()) && - let Ok(index) = field.name().parse::() && - column_projections.contains(&index) + if result_fields.iter().all(|x| x.name() != field.name()) + && let Ok(index) = field.name().parse::() + && column_projections.contains(&index) { let metadata = self.metadata.read(); let unexpected_column = metadata.column(index); - let unexpected_column_info = if let Some(table_index) = unexpected_column.table_index() { - format!("{:?}.{:?}", metadata.table(table_index).name(), unexpected_column.name()) - } else { - unexpected_column.name().to_string() - }; + let unexpected_column_info = + if let Some(table_index) = unexpected_column.table_index() { + format!( + "{:?}.{:?}", + metadata.table(table_index).name(), + unexpected_column.name() + ) + } else { + unexpected_column.name().to_string() + }; return Err(ErrorCode::SemanticError(format!( "cannot access the {} in ANTI or SEMI join", unexpected_column_info diff --git a/src/query/sql/src/planner/binder/copy_into_table.rs b/src/query/sql/src/planner/binder/copy_into_table.rs index 01dddab8cad1b..bcb129a6c797c 100644 --- a/src/query/sql/src/planner/binder/copy_into_table.rs +++ b/src/query/sql/src/planner/binder/copy_into_table.rs @@ -178,7 +178,9 @@ impl<'a> Binder { bind_ctx: &BindContext, plan: CopyIntoTablePlan, ) -> Result { - if let FileFormatParams::Parquet(fmt) = &plan.stage_table_info.stage_info.file_format_params && fmt.missing_field_as == NullAs::Error { + if let FileFormatParams::Parquet(fmt) = &plan.stage_table_info.stage_info.file_format_params + && fmt.missing_field_as == NullAs::Error + { let table_ctx = self.ctx.clone(); let use_parquet2 = table_ctx.get_settings().get_use_parquet2()?; let stage_info = plan.stage_table_info.stage_info.clone(); @@ -199,9 +201,7 @@ impl<'a> Binder { span: None, database: None, table: None, - column: AstColumnID::Name(Identifier::from_name( - dest_field.name().to_string(), - )), + column: AstColumnID::Name(Identifier::from_name(dest_field.name().to_string())), }; let expr = match table_schema.field_with_name(dest_field.name()) { Ok(src_field) => { @@ -229,9 +229,7 @@ impl<'a> Binder { column } } - Err(_) => { - column - } + Err(_) => column, }; select_list.push(SelectTarget::AliasedExpr { expr: Box::new(expr), diff --git a/src/query/sql/src/planner/binder/ddl/task.rs b/src/query/sql/src/planner/binder/ddl/task.rs index f84dc9a8f114a..5c84d9bf88ce0 100644 --- a/src/query/sql/src/planner/binder/ddl/task.rs +++ b/src/query/sql/src/planner/binder/ddl/task.rs @@ -67,7 +67,10 @@ fn verify_scheduler_option(schedule_opts: &Option) -> Result<() cron_expr ))); } - if let Some(time_zone) = time_zone && !time_zone.is_empty() && chrono_tz::Tz::from_str(&time_zone).is_err() { + if let Some(time_zone) = time_zone + && !time_zone.is_empty() + && chrono_tz::Tz::from_str(&time_zone).is_err() + { return Err(ErrorCode::SemanticError(format!( "invalid time zone {}", time_zone diff --git a/src/query/sql/src/planner/binder/replace.rs b/src/query/sql/src/planner/binder/replace.rs index 53a6c882261b2..bdbbb1f240111 100644 --- a/src/query/sql/src/planner/binder/replace.rs +++ b/src/query/sql/src/planner/binder/replace.rs @@ -77,7 +77,7 @@ impl Binder { .map(|ident| { schema .field_with_name(&normalize_identifier(ident, &self.name_resolution_ctx).name) - .map(|v| v.clone()) + .cloned() }) .collect::>>()?; diff --git a/src/query/sql/src/planner/binder/table.rs b/src/query/sql/src/planner/binder/table.rs index 6d04daa47870d..0a0d23da0f605 100644 --- a/src/query/sql/src/planner/binder/table.rs +++ b/src/query/sql/src/planner/binder/table.rs @@ -681,8 +681,8 @@ impl Binder { bind_context: &mut BindContext, span: &Span, name: &Identifier, - params: &Vec, - named_params: &Vec<(String, Expr)>, + params: &[Expr], + named_params: &[(String, Expr)], alias: &Option, ) -> Result<(SExpr, BindContext)> { let func_name = normalize_identifier(name, &self.name_resolution_ctx); @@ -1540,8 +1540,8 @@ pub fn parse_result_scan_args(table_args: &TableArgs) -> Result { fn parse_table_function_args( span: &Span, func_name: &Identifier, - params: &Vec, - named_params: &Vec<(String, Expr)>, + params: &[Expr], + named_params: &[(String, Expr)], ) -> Result> { if func_name.name.eq_ignore_ascii_case("flatten") { // build flatten function arguments. @@ -1574,7 +1574,7 @@ fn parse_table_function_args( } if !params.is_empty() { - args.extend(params.clone()); + args.extend(params.iter().cloned()); } Ok(args) } else { @@ -1591,6 +1591,6 @@ fn parse_table_function_args( .set_span(*span)); } - Ok(params.clone()) + Ok(params.to_vec()) } } diff --git a/src/query/sql/src/planner/binder/table_args.rs b/src/query/sql/src/planner/binder/table_args.rs index bd2de3d344b0e..8693f1a5a65c5 100644 --- a/src/query/sql/src/planner/binder/table_args.rs +++ b/src/query/sql/src/planner/binder/table_args.rs @@ -29,8 +29,8 @@ use crate::ScalarExpr; #[async_backtrace::framed] pub async fn bind_table_args( scalar_binder: &mut ScalarBinder<'_>, - params: &Vec, - named_params: &Vec<(String, Expr)>, + params: &[Expr], + named_params: &[(String, Expr)], ) -> Result { let mut args = Vec::with_capacity(params.len()); for arg in params.iter() { diff --git a/src/query/sql/src/planner/binder/values.rs b/src/query/sql/src/planner/binder/values.rs index 0c4785336a4bb..aba1965d8ae84 100644 --- a/src/query/sql/src/planner/binder/values.rs +++ b/src/query/sql/src/planner/binder/values.rs @@ -49,7 +49,7 @@ impl Binder { &mut self, bind_context: &mut BindContext, span: Span, - values: &Vec>, + values: &[Vec], ) -> Result<(SExpr, BindContext)> { bind_values( self.ctx.clone(), @@ -69,7 +69,7 @@ pub async fn bind_values( metadata: MetadataRef, bind_context: &mut BindContext, span: Span, - values: &Vec>, + values: &[Vec], ) -> Result<(SExpr, BindContext)> { if values.is_empty() { return Err(ErrorCode::SemanticError( diff --git a/src/query/sql/src/planner/optimizer/property/histogram.rs b/src/query/sql/src/planner/optimizer/property/histogram.rs index 6167607a88e43..f6bf4a387d191 100644 --- a/src/query/sql/src/planner/optimizer/property/histogram.rs +++ b/src/query/sql/src/planner/optimizer/property/histogram.rs @@ -68,9 +68,7 @@ impl Histogram { } /// Get iterator of buckets - pub fn buckets_iter( - &self, - ) -> impl Iterator + DoubleEndedIterator { + pub fn buckets_iter(&self) -> impl DoubleEndedIterator { self.buckets.iter() } } diff --git a/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs b/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs index 67b123bb20a7a..7bdefdb308c6d 100644 --- a/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs +++ b/src/query/sql/src/planner/optimizer/rule/transform/rule_eager_aggregation.rs @@ -809,7 +809,7 @@ impl Rule for RuleEagerAggregation { )), Arc::new(join_expr.child(1)?.clone()), ]))]) - .replace_plan(Arc::new(eager_groupby_count_count_sum.try_into()?)) + .replace_plan(Arc::new(eager_groupby_count_count_sum.into())) } else { eval_scalar_expr .replace_children(vec![Arc::new(join_expr.replace_children(vec![ @@ -833,7 +833,7 @@ impl Rule for RuleEagerAggregation { )), )), ]))]) - .replace_plan(Arc::new(eager_groupby_count_count_sum.try_into()?)) + .replace_plan(Arc::new(eager_groupby_count_count_sum.into())) }); // Apply eager split on d and d^1. @@ -895,7 +895,7 @@ impl Rule for RuleEagerAggregation { )), )), ]))]) - .replace_plan(Arc::new(eager_split_count_sum.try_into()?)), + .replace_plan(Arc::new(eager_split_count_sum.into())), ); } else if can_push_down[d] && eager_aggregations[d ^ 1].is_empty() { // (1) Try to apply eager group-by on d. @@ -1127,7 +1127,7 @@ impl Rule for RuleEagerAggregation { )), )), ]))]) - .replace_plan(Arc::new(eager_count_sum.try_into()?)) + .replace_plan(Arc::new(eager_count_sum.into())) } else { eval_scalar_expr .replace_children(vec![Arc::new(join_expr.replace_children(vec![ @@ -1140,7 +1140,7 @@ impl Rule for RuleEagerAggregation { )), Arc::new(join_expr.child(1)?.clone()), ]))]) - .replace_plan(Arc::new(eager_count_sum.try_into()?)) + .replace_plan(Arc::new(eager_count_sum.into())) }); // Apply double eager on d and d^1. @@ -1182,7 +1182,7 @@ impl Rule for RuleEagerAggregation { )), )), ]))]) - .replace_plan(Arc::new(double_eager_count_sum.try_into()?)) + .replace_plan(Arc::new(double_eager_count_sum.into())) } else { eval_scalar_expr .replace_children(vec![Arc::new(join_expr.replace_children(vec![ @@ -1210,7 +1210,7 @@ impl Rule for RuleEagerAggregation { )), )), ]))]) - .replace_plan(Arc::new(double_eager_count_sum.try_into()?)) + .replace_plan(Arc::new(double_eager_count_sum.into())) }); } } @@ -1233,19 +1233,19 @@ impl Rule for RuleEagerAggregation { .replace_children(vec![Arc::new( final_agg_partial_expr .replace_children(vec![Arc::new(join_exprs[idx].clone())]) - .replace_plan(Arc::new(final_agg_partials[idx].clone().try_into()?)), + .replace_plan(Arc::new(final_agg_partials[idx].clone().into())), )]) - .replace_plan(Arc::new(final_agg_finals[idx].clone().try_into()?)); + .replace_plan(Arc::new(final_agg_finals[idx].clone().into())); let mut result = if has_sort { eval_scalar_expr .replace_children(vec![Arc::new( sort_expr.replace_children(vec![Arc::new(temp_final_agg_expr)]), )]) - .replace_plan(Arc::new(final_eval_scalars[idx].clone().try_into()?)) + .replace_plan(Arc::new(final_eval_scalars[idx].clone().into())) } else { eval_scalar_expr .replace_children(vec![Arc::new(temp_final_agg_expr)]) - .replace_plan(Arc::new(final_eval_scalars[idx].clone().try_into()?)) + .replace_plan(Arc::new(final_eval_scalars[idx].clone().into())) }; result.set_applied_rule(&self.id); state.add_result(result); @@ -1529,7 +1529,9 @@ fn update_aggregate_and_eval( let mut success = false; // Modify the eval scalars of all aggregate functions that are not AVG components. - if let Some(indexes) = eval_scalar_items.get(&old_index) && !avg_components.contains_key(&old_index) { + if let Some(indexes) = eval_scalar_items.get(&old_index) + && !avg_components.contains_key(&old_index) + { for eval_scalar in eval_scalars { for item_idx in indexes { let eval_scalar_item = &mut (eval_scalar).items[*item_idx]; @@ -1540,7 +1542,10 @@ fn update_aggregate_and_eval( column_binding.data_type = Box::new(DataType::Nullable(Box::new( DataType::Number(NumberDataType::UInt64), ))); - eval_scalar_item.scalar = wrap_cast(&eval_scalar_item.scalar, &DataType::Number(NumberDataType::UInt64)); + eval_scalar_item.scalar = wrap_cast( + &eval_scalar_item.scalar, + &DataType::Number(NumberDataType::UInt64), + ); } success = true; } diff --git a/src/query/sql/src/planner/semantic/type_check.rs b/src/query/sql/src/planner/semantic/type_check.rs index f66a08baa654b..3f63125e33d0a 100644 --- a/src/query/sql/src/planner/semantic/type_check.rs +++ b/src/query/sql/src/planner/semantic/type_check.rs @@ -504,40 +504,37 @@ impl<'a> TypeChecker<'a> { .. } => { if let Expr::Subquery { - subquery, modifier, .. + subquery, + modifier: Some(subquery_modifier), + .. } = &**right { - if let Some(subquery_modifier) = modifier { - match subquery_modifier { - SubqueryModifier::Any | SubqueryModifier::Some => { - let comparison_op = ComparisonOp::try_from(op)?; - self.resolve_subquery( - SubqueryType::Any, - subquery, - Some(*left.clone()), - Some(comparison_op), - ) - .await? - } - SubqueryModifier::All => { - let contrary_op = op.to_contrary()?; - let rewritten_subquery = Expr::Subquery { - span: right.span(), - modifier: Some(SubqueryModifier::Any), - subquery: (*subquery).clone(), - }; - self.resolve_unary_op(*span, &UnaryOperator::Not, &Expr::BinaryOp { - span: *span, - op: contrary_op, - left: (*left).clone(), - right: Box::new(rewritten_subquery), - }) - .await? - } + match subquery_modifier { + SubqueryModifier::Any | SubqueryModifier::Some => { + let comparison_op = ComparisonOp::try_from(op)?; + self.resolve_subquery( + SubqueryType::Any, + subquery, + Some(*left.clone()), + Some(comparison_op), + ) + .await? } - } else { - self.resolve_binary_op(*span, op, left.as_ref(), right.as_ref()) + SubqueryModifier::All => { + let contrary_op = op.to_contrary()?; + let rewritten_subquery = Expr::Subquery { + span: right.span(), + modifier: Some(SubqueryModifier::Any), + subquery: (*subquery).clone(), + }; + self.resolve_unary_op(*span, &UnaryOperator::Not, &Expr::BinaryOp { + span: *span, + op: contrary_op, + left: (*left).clone(), + right: Box::new(rewritten_subquery), + }) .await? + } } } else { self.resolve_binary_op(*span, op, left.as_ref(), right.as_ref()) diff --git a/src/query/storages/common/cache/src/providers/mod.rs b/src/query/storages/common/cache/src/providers/mod.rs index e4279a52484c9..69426e9e02470 100644 --- a/src/query/storages/common/cache/src/providers/mod.rs +++ b/src/query/storages/common/cache/src/providers/mod.rs @@ -17,12 +17,10 @@ mod memory_cache; mod table_data_cache; pub use disk_cache::result::Error as DiskCacheError; pub use disk_cache::result::Result as DiskCacheResult; -pub use disk_cache::DiskCache; pub use disk_cache::DiskCacheKey; pub use disk_cache::LruDiskCache; pub use disk_cache::LruDiskCacheBuilder; pub use disk_cache::LruDiskCacheHolder; -pub use memory_cache::BytesCache; pub use memory_cache::InMemoryBytesCacheHolder; pub use memory_cache::InMemoryCache; pub use memory_cache::InMemoryCacheBuilder; diff --git a/src/query/storages/common/index/tests/it/filters/bloom_filter.rs b/src/query/storages/common/index/tests/it/filters/bloom_filter.rs index 3eed74e3762d2..31f11c810e86a 100644 --- a/src/query/storages/common/index/tests/it/filters/bloom_filter.rs +++ b/src/query/storages/common/index/tests/it/filters/bloom_filter.rs @@ -73,7 +73,7 @@ fn test_bloom_filter() -> Result<()> { ]); let map_ty = DataType::Map(Box::new(kv_ty)); - let blocks = vec![ + let blocks = [ DataBlock::new( vec![ BlockEntry::new( @@ -238,7 +238,7 @@ fn test_specify_bloom_filter() -> Result<()> { TableField::new("1", TableDataType::String), ])); - let blocks = vec![DataBlock::new_from_columns(vec![ + let blocks = [DataBlock::new_from_columns(vec![ UInt8Type::from_data(vec![1, 2]), StringType::from_data(vec!["a", "b"]), ])]; @@ -277,7 +277,7 @@ fn test_string_bloom_filter() -> Result<()> { ])); let val: String = (0..512).map(|_| 'a').collect(); - let blocks = vec![DataBlock::new_from_columns(vec![ + let blocks = [DataBlock::new_from_columns(vec![ UInt8Type::from_data(vec![1, 2]), StringType::from_data(vec![&val, "bc"]), ])]; diff --git a/src/query/storages/common/table_meta/src/meta/v1/mod.rs b/src/query/storages/common/table_meta/src/meta/v1/mod.rs index fcfdfb823b366..894d22487115b 100644 --- a/src/query/storages/common/table_meta/src/meta/v1/mod.rs +++ b/src/query/storages/common/table_meta/src/meta/v1/mod.rs @@ -19,5 +19,4 @@ mod table_snapshot_statistics; pub use segment::BlockMeta; pub use segment::SegmentInfo; pub use snapshot::TableSnapshot; -pub use snapshot::TableSnapshotLite; pub use table_snapshot_statistics::TableSnapshotStatistics; diff --git a/src/query/storages/delta/src/dal.rs b/src/query/storages/delta/src/dal.rs index ee8787caa9df0..919eb65e8de27 100644 --- a/src/query/storages/delta/src/dal.rs +++ b/src/query/storages/delta/src/dal.rs @@ -336,11 +336,11 @@ mod tests { let op = Operator::new(services::Memory::default()).unwrap().finish(); let object_store = Arc::new(OpendalStore::new(op)); - let path: Path = "data/test.txt".try_into().unwrap(); + let path: Path = "data/test.txt".into(); let bytes = Bytes::from_static(b"hello, world!"); object_store.put(&path, bytes).await.unwrap(); - let path: Path = "data/nested/test.txt".try_into().unwrap(); + let path: Path = "data/nested/test.txt".into(); let bytes = Bytes::from_static(b"hello, world! I am nested."); object_store.put(&path, bytes).await.unwrap(); @@ -353,7 +353,7 @@ mod tests { let object_store: Arc = Arc::new(OpendalStore::new(op)); // Retrieve a specific file - let path: Path = "data/test.txt".try_into().unwrap(); + let path: Path = "data/test.txt".into(); let bytes = Bytes::from_static(b"hello, world!"); object_store.put(&path, bytes.clone()).await.unwrap(); @@ -377,7 +377,7 @@ mod tests { #[tokio::test] async fn test_list() { let object_store = create_test_object_store().await; - let path: Path = "data/".try_into().unwrap(); + let path: Path = "data/".into(); let results = object_store .list(Some(&path)) .await @@ -398,7 +398,7 @@ mod tests { #[tokio::test] async fn test_list_with_delimiter() { let object_store = create_test_object_store().await; - let path: Path = "data/".try_into().unwrap(); + let path: Path = "data/".into(); let result = object_store.list_with_delimiter(Some(&path)).await.unwrap(); assert_eq!(result.objects.len(), 1); assert_eq!(result.common_prefixes.len(), 1); @@ -409,8 +409,8 @@ mod tests { #[tokio::test] async fn test_list_with_offset() { let object_store = create_test_object_store().await; - let path: Path = "data/".try_into().unwrap(); - let offset: Path = "data/nested/test.txt".try_into().unwrap(); + let path: Path = "data/".into(); + let offset: Path = "data/nested/test.txt".into(); let result = object_store .list_with_offset(Some(&path), &offset) .await diff --git a/src/query/storages/fuse/src/fuse_table.rs b/src/query/storages/fuse/src/fuse_table.rs index 4ef5adcd56ea5..d7e63288704d6 100644 --- a/src/query/storages/fuse/src/fuse_table.rs +++ b/src/query/storages/fuse/src/fuse_table.rs @@ -536,8 +536,10 @@ impl Table for FuseTable { ) -> Result<()> { // if new cluster_key_str is the same with old one, // no need to change - if let Some(old_cluster_key_str) = self.cluster_key_str() && *old_cluster_key_str == cluster_key_str{ - return Ok(()) + if let Some(old_cluster_key_str) = self.cluster_key_str() + && *old_cluster_key_str == cluster_key_str + { + return Ok(()); } let mut new_table_meta = self.get_table_info().meta.clone(); new_table_meta = new_table_meta.push_cluster_key(cluster_key_str); diff --git a/src/query/storages/fuse/src/io/write/block_writer.rs b/src/query/storages/fuse/src/io/write/block_writer.rs index 562d05b347116..cc91e09beae65 100644 --- a/src/query/storages/fuse/src/io/write/block_writer.rs +++ b/src/query/storages/fuse/src/io/write/block_writer.rs @@ -211,7 +211,7 @@ impl BlockBuilder { .as_ref() .map(|v| v.size) .unwrap_or_default(), - compression: self.write_settings.table_compression.try_into()?, + compression: self.write_settings.table_compression.into(), create_on: Some(Utc::now()), }; diff --git a/src/query/storages/fuse/src/io/write/mod.rs b/src/query/storages/fuse/src/io/write/mod.rs index 6a150b29f3943..6cf7efc778e42 100644 --- a/src/query/storages/fuse/src/io/write/mod.rs +++ b/src/query/storages/fuse/src/io/write/mod.rs @@ -21,7 +21,6 @@ pub use block_writer::serialize_block; pub use block_writer::write_data; pub use block_writer::BlockBuilder; pub use block_writer::BlockSerialization; -pub use block_writer::BloomIndexState; pub use meta_writer::CachedMetaWriter; pub use meta_writer::MetaWriter; pub use segment_writer::SegmentWriter; diff --git a/src/query/storages/fuse/src/lib.rs b/src/query/storages/fuse/src/lib.rs index 33876e81f6c75..e790bd76e5fb4 100644 --- a/src/query/storages/fuse/src/lib.rs +++ b/src/query/storages/fuse/src/lib.rs @@ -13,12 +13,12 @@ // limitations under the License. #![allow(clippy::uninlined_format_args)] +#![allow(clippy::useless_asref)] #![feature(type_alias_impl_trait)] #![feature(iter_order_by)] #![feature(let_chains)] #![feature(impl_trait_in_assoc_type)] #![feature(int_roundings)] -#![feature(result_option_inspect)] #![feature(iterator_try_reduce)] #![recursion_limit = "256"] diff --git a/src/query/storages/fuse/src/operations/mutation/mutator/block_compact_mutator.rs b/src/query/storages/fuse/src/operations/mutation/mutator/block_compact_mutator.rs index 79d75f4c9764f..4b4e58ac7cc53 100644 --- a/src/query/storages/fuse/src/operations/mutation/mutator/block_compact_mutator.rs +++ b/src/query/storages/fuse/src/operations/mutation/mutator/block_compact_mutator.rs @@ -326,10 +326,7 @@ impl SegmentCompactChecker { } } - fn check_for_compact( - &mut self, - segments: &Vec<(SegmentIndex, Arc)>, - ) -> bool { + fn check_for_compact(&mut self, segments: &[(SegmentIndex, Arc)]) -> bool { if segments.is_empty() { return false; } diff --git a/src/query/storages/fuse/src/operations/read/runtime_filter_prunner.rs b/src/query/storages/fuse/src/operations/read/runtime_filter_prunner.rs index 9090ef2da7257..40252a3060afb 100644 --- a/src/query/storages/fuse/src/operations/read/runtime_filter_prunner.rs +++ b/src/query/storages/fuse/src/operations/read/runtime_filter_prunner.rs @@ -43,7 +43,7 @@ use crate::FusePartInfo; pub fn runtime_filter_pruner( table_schema: Arc, part: &PartInfoPtr, - filters: &Vec>, + filters: &[Expr], func_ctx: &FunctionContext, ) -> Result { if filters.is_empty() { diff --git a/src/query/storages/fuse/src/operations/replace_into/mutator/deletion_accumulator.rs b/src/query/storages/fuse/src/operations/replace_into/mutator/deletion_accumulator.rs index 4ae3bae9aa3af..256562fdf9a46 100644 --- a/src/query/storages/fuse/src/operations/replace_into/mutator/deletion_accumulator.rs +++ b/src/query/storages/fuse/src/operations/replace_into/mutator/deletion_accumulator.rs @@ -33,7 +33,7 @@ impl DeletionAccumulator { segment_index: SegmentIndex, block_index: BlockIndex, source_on_conflict_key_set: &HashSet, - source_bloom_hashes: &Vec>, + source_bloom_hashes: &[Vec], ) { match self.deletions.entry(segment_index) { Entry::Occupied(ref mut v) => { @@ -50,7 +50,7 @@ impl DeletionAccumulator { }) .or_insert(( source_on_conflict_key_set.clone(), - source_bloom_hashes.clone(), + source_bloom_hashes.to_owned(), )); } Entry::Vacant(e) => { @@ -58,7 +58,7 @@ impl DeletionAccumulator { block_index, ( source_on_conflict_key_set.clone(), - source_bloom_hashes.clone(), + source_bloom_hashes.to_owned(), ), )])); } diff --git a/src/query/storages/parquet/src/lib.rs b/src/query/storages/parquet/src/lib.rs index fd1796c68f3b5..faf1e64ecc1e8 100644 --- a/src/query/storages/parquet/src/lib.rs +++ b/src/query/storages/parquet/src/lib.rs @@ -12,14 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. +#![allow(internal_features)] #![allow(clippy::uninlined_format_args)] +#![allow(clippy::useless_asref)] +#![allow(clippy::diverging_sub_expression)] #![feature(try_blocks)] #![feature(impl_trait_in_assoc_type)] #![feature(let_chains)] #![feature(core_intrinsics)] #![feature(int_roundings)] #![feature(box_patterns)] -#![allow(clippy::diverging_sub_expression)] mod parquet2; mod parquet_part; diff --git a/src/query/storages/parquet/src/parquet_rs/mod.rs b/src/query/storages/parquet/src/parquet_rs/mod.rs index 907caa79c8e3b..e82bece95108e 100644 --- a/src/query/storages/parquet/src/parquet_rs/mod.rs +++ b/src/query/storages/parquet/src/parquet_rs/mod.rs @@ -24,7 +24,6 @@ mod meta; mod schema; pub use copy_into_table::ParquetTableForCopy; -pub use meta::read_metas_in_parallel; pub use meta::read_metas_in_parallel_for_copy; pub use meta::read_parquet_metas_batch; pub use parquet_reader::InMemoryRowGroup; diff --git a/src/query/storages/parquet/src/parquet_rs/parquet_reader/read_policy/mod.rs b/src/query/storages/parquet/src/parquet_rs/parquet_reader/read_policy/mod.rs index ea7d94e80823b..16ef42d0ed8b1 100644 --- a/src/query/storages/parquet/src/parquet_rs/parquet_reader/read_policy/mod.rs +++ b/src/query/storages/parquet/src/parquet_rs/parquet_reader/read_policy/mod.rs @@ -18,9 +18,6 @@ mod topk_only; mod utils; pub mod policy; -pub use no_prefetch::NoPrefetchPolicy; pub use no_prefetch::NoPretchPolicyBuilder; -pub use predicate_and_topk::PredicateAndTopkPolicy; pub use predicate_and_topk::PredicateAndTopkPolicyBuilder; -pub use topk_only::TopkOnlyPolicy; pub use topk_only::TopkOnlyPolicyBuilder; diff --git a/src/query/storages/parquet/src/parquet_rs/pruning.rs b/src/query/storages/parquet/src/parquet_rs/pruning.rs index 27d7b59d0980d..ec00657e303ba 100644 --- a/src/query/storages/parquet/src/parquet_rs/pruning.rs +++ b/src/query/storages/parquet/src/parquet_rs/pruning.rs @@ -235,7 +235,9 @@ impl ParquetRSPruner { selectors.extend(sel_of_cur_rg); } // Trim selectors. - while let Some(s) = selectors.last() && s.row_count == 0 { + while let Some(s) = selectors.last() + && s.row_count == 0 + { selectors.pop(); } Ok(Some(RowSelection::from(selectors))) diff --git a/src/query/storages/parquet/tests/it/merge_io.rs b/src/query/storages/parquet/tests/it/merge_io.rs index b4bda7900b31f..e07d790ef1d6d 100644 --- a/src/query/storages/parquet/tests/it/merge_io.rs +++ b/src/query/storages/parquet/tests/it/merge_io.rs @@ -89,9 +89,9 @@ async fn test_merge() { // for gap=10 let gap10 = InMemoryRowGroup::new(path, op, &meta, None, 10, 200); - let ranges = vec![(1..10), (15..30), (40..50)]; - let (gap0_chunks, gap0_merged) = gap0.get_ranges(&ranges.to_vec()).await.unwrap(); - let (gap10_chunks, gap10_merged) = gap10.get_ranges(&ranges.to_vec()).await.unwrap(); + let ranges = [(1..10), (15..30), (40..50)]; + let (gap0_chunks, gap0_merged) = gap0.get_ranges(ranges.as_ref()).await.unwrap(); + let (gap10_chunks, gap10_merged) = gap10.get_ranges(ranges.as_ref()).await.unwrap(); // gap=0 no merged assert!(!gap0_merged); // gap=10 merge happend diff --git a/src/query/storages/system/src/lib.rs b/src/query/storages/system/src/lib.rs index f7edbdbbdf07e..7a0643c671757 100644 --- a/src/query/storages/system/src/lib.rs +++ b/src/query/storages/system/src/lib.rs @@ -13,6 +13,7 @@ // limitations under the License. #![allow(clippy::uninlined_format_args)] +#![allow(clippy::useless_asref)] #![feature(type_alias_impl_trait)] #![feature(impl_trait_in_assoc_type)] diff --git a/src/query/users/src/lib.rs b/src/query/users/src/lib.rs index effe3ac97b395..a7eea7b7f5a72 100644 --- a/src/query/users/src/lib.rs +++ b/src/query/users/src/lib.rs @@ -13,7 +13,6 @@ // limitations under the License. #![feature(let_chains)] -#![feature(ip_in_core)] #![allow(clippy::uninlined_format_args)] extern crate core; diff --git a/tests/sqllogictests/suites/mode/cluster/memo/aggregate_property.test b/tests/sqllogictests/suites/mode/cluster/memo/aggregate_property.test index 36ded546c7939..716592cd68475 100644 --- a/tests/sqllogictests/suites/mode/cluster/memo/aggregate_property.test +++ b/tests/sqllogictests/suites/mode/cluster/memo/aggregate_property.test @@ -7,6 +7,9 @@ create database aggregate_property statement ok use aggregate_property +statement ok +set max_threads = 1 + statement ok create table t_10(a int) as select * from numbers(10) @@ -23,7 +26,7 @@ where t_10.a = t_1000.a and t_100.a = t_1000.a ---- Memo ├── root group: #8 -├── estimated memory: 9912 bytes +├── estimated memory: 10080 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 100.000, children: [] @@ -92,7 +95,7 @@ group by t_10.a, t_100.a ---- Memo ├── root group: #8 -├── estimated memory: 23128 bytes +├── estimated memory: 23520 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 100.000, children: [] diff --git a/tests/sqllogictests/suites/mode/cluster/memo/join_property.test b/tests/sqllogictests/suites/mode/cluster/memo/join_property.test index 8b2d4577d15e1..6af96a70224a8 100644 --- a/tests/sqllogictests/suites/mode/cluster/memo/join_property.test +++ b/tests/sqllogictests/suites/mode/cluster/memo/join_property.test @@ -7,6 +7,9 @@ create database join_property statement ok use join_property +statement ok +set max_threads = 1 + statement ok create table t_10(a int) as select * from numbers(10) @@ -22,7 +25,7 @@ select * from t_10, t_100, t_1000 where t_10.a = t_1000.a and t_100.a = t_1000.a ---- Memo ├── root group: #5 -├── estimated memory: 8024 bytes +├── estimated memory: 8160 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 100.000, children: [] @@ -76,7 +79,7 @@ select * from t_1000 left join t_10 on t_1000.a = t_10.a left join t_100 on t_10 ---- Memo ├── root group: #5 -├── estimated memory: 8024 bytes +├── estimated memory: 8160 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 1000.000, children: [] @@ -130,7 +133,7 @@ select * from t_1000 right join t_10 on t_1000.a = t_10.a right join t_100 on t_ ---- Memo ├── root group: #5 -├── estimated memory: 7080 bytes +├── estimated memory: 7200 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 1000.000, children: [] @@ -180,7 +183,7 @@ select * from t_1000 full join t_10 on t_1000.a = t_10.a full join t_100 on t_10 ---- Memo ├── root group: #5 -├── estimated memory: 7080 bytes +├── estimated memory: 7200 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 1000.000, children: [] @@ -230,7 +233,7 @@ select * from t_10, t_100, t_1000 ---- Memo ├── root group: #5 -├── estimated memory: 6136 bytes +├── estimated memory: 6240 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 10.000, children: [] diff --git a/tests/sqllogictests/suites/mode/cluster/memo/mix_property.test b/tests/sqllogictests/suites/mode/cluster/memo/mix_property.test index 4bdc095fa6848..49d7519e787ef 100644 --- a/tests/sqllogictests/suites/mode/cluster/memo/mix_property.test +++ b/tests/sqllogictests/suites/mode/cluster/memo/mix_property.test @@ -7,6 +7,9 @@ create database mix_property statement ok use mix_property +statement ok +set max_threads = 1 + statement ok create table t_10(a int) as select * from numbers(10) @@ -26,7 +29,7 @@ limit 10 ---- Memo ├── root group: #10 -├── estimated memory: 10856 bytes +├── estimated memory: 11040 bytes ├── Group #0 │ ├── Best properties │ │ ├── { dist: Any }: expr: #0, cost: 100.000, children: []