diff --git a/ydb/core/tx/schemeshard/olap/store/store.h b/ydb/core/tx/schemeshard/olap/store/store.h index b5719f3aed91..6d60b9319122 100644 --- a/ydb/core/tx/schemeshard/olap/store/store.h +++ b/ydb/core/tx/schemeshard/olap/store/store.h @@ -147,11 +147,11 @@ struct TOlapStoreInfo { ILayoutPolicy::TPtr GetTablesLayoutPolicy() const; - void UpdateShardStats(TShardIdx shardIdx, const TPartitionStats& newStats) { + void UpdateShardStats(TDiskSpaceUsageDelta* diskSpaceUsageDelta, TShardIdx shardIdx, const TPartitionStats& newStats, TInstant now) { Stats.Aggregated.PartCount = ColumnShards.size(); Stats.PartitionStats[shardIdx]; // insert if none - Stats.UpdateShardStats(shardIdx, newStats); + Stats.UpdateShardStats(diskSpaceUsageDelta, shardIdx, newStats, now); } }; -} \ No newline at end of file +} diff --git a/ydb/core/tx/schemeshard/olap/table/table.h b/ydb/core/tx/schemeshard/olap/table/table.h index 8bb0549be91d..e9fca9431c4e 100644 --- a/ydb/core/tx/schemeshard/olap/table/table.h +++ b/ydb/core/tx/schemeshard/olap/table/table.h @@ -103,15 +103,15 @@ struct TColumnTableInfo { return Stats; } - void UpdateShardStats(const TShardIdx shardIdx, const TPartitionStats& newStats) { + void UpdateShardStats(TDiskSpaceUsageDelta* diskSpaceUsageDelta, const TShardIdx shardIdx, const TPartitionStats& newStats, TInstant now) { Stats.Aggregated.PartCount = GetColumnShards().size(); Stats.PartitionStats[shardIdx]; // insert if none - Stats.UpdateShardStats(shardIdx, newStats); + Stats.UpdateShardStats(diskSpaceUsageDelta, shardIdx, newStats, now); } - void UpdateTableStats(const TShardIdx shardIdx, const TPathId& pathId, const TPartitionStats& newStats) { + void UpdateTableStats(const TShardIdx shardIdx, const TPathId& pathId, const TPartitionStats& newStats, TInstant now) { Stats.TableStats[pathId].Aggregated.PartCount = GetColumnShards().size(); - Stats.UpdateTableStats(shardIdx, pathId, newStats); + Stats.UpdateTableStats(shardIdx, pathId, newStats, now); } TConclusion> BuildEntity(const TPathId& pathId, const NOlap::NAlter::TEntityInitializationContext& iContext) const; diff --git a/ydb/core/tx/schemeshard/schemeshard__init.cpp b/ydb/core/tx/schemeshard/schemeshard__init.cpp index 1e520769fe54..4cb2e5ff16a9 100644 --- a/ydb/core/tx/schemeshard/schemeshard__init.cpp +++ b/ydb/core/tx/schemeshard/schemeshard__init.cpp @@ -1845,6 +1845,8 @@ struct TSchemeShard::TTxInit : public TTransactionBase { } } + tableInfo->IsExternalBlobsEnabled = PartitionConfigHasExternalBlobsEnabled(tableInfo->PartitionConfig()); + TString alterTabletFull = std::get<4>(rec); TString alterTabletDiff = std::get<5>(rec); if (alterTabletFull) { @@ -2384,6 +2386,7 @@ struct TSchemeShard::TTxInit : public TTransactionBase { TPathId prevTableId; + TInstant now = AppData()->TimeProvider->Now(); while (!rowSet.EndOfSet()) { const TPathId tableId = TPathId( rowSet.GetValue(), @@ -2460,7 +2463,6 @@ struct TSchemeShard::TTxInit : public TTransactionBase { stats.RangeReads = rowSet.GetValue(); stats.RangeReadRows = rowSet.GetValue(); - TInstant now = AppData(ctx)->TimeProvider->Now(); stats.SetCurrentRawCpuUsage(rowSet.GetValue(), now); stats.Memory = rowSet.GetValue(); stats.Network = rowSet.GetValue(); @@ -2478,7 +2480,8 @@ struct TSchemeShard::TTxInit : public TTransactionBase { stats.LocksWholeShard = rowSet.GetValueOrDefault(); stats.LocksBroken = rowSet.GetValueOrDefault(); - tableInfo->UpdateShardStats(shardIdx, stats); + TDiskSpaceUsageDelta unusedDelta; + tableInfo->UpdateShardStats(&unusedDelta, shardIdx, stats, now); // note that we don't update shard metrics here, because we will always update // the shard metrics in TSchemeShard::SetPartitioning diff --git a/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp b/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp index 921fb7b552ea..f4c3dadd58ea 100644 --- a/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp +++ b/ydb/core/tx/schemeshard/schemeshard__pq_stats.cpp @@ -22,12 +22,12 @@ class TTxStoreTopicStats: public TTxStoreStats::TItem& item, TTransactionContext& txc, const TActorContext& ctx) override; + bool PersistSingleStats(const TPathId& pathId, const TStatsQueue::TItem& item, TInstant now, TTransactionContext& txc, const TActorContext& ctx) override; void ScheduleNextBatch(const TActorContext& ctx) override; }; -bool TTxStoreTopicStats::PersistSingleStats(const TPathId& pathId, const TStatsQueueItem& item, TTransactionContext& txc, const TActorContext& ctx) { +bool TTxStoreTopicStats::PersistSingleStats(const TPathId& pathId, const TStatsQueueItem& item, TInstant /*now*/, TTransactionContext& txc, const TActorContext& ctx) { const auto& rec = item.Ev->Get()->Record; TTopicStats newStats; diff --git a/ydb/core/tx/schemeshard/schemeshard__stats.h b/ydb/core/tx/schemeshard/schemeshard__stats.h index 58375da29339..e2cbfaef87c3 100644 --- a/ydb/core/tx/schemeshard/schemeshard__stats.h +++ b/ydb/core/tx/schemeshard/schemeshard__stats.h @@ -124,7 +124,7 @@ class TTxStoreStats: public NTabletFlatExecutor::TTransactionBase bool Execute(TTransactionContext& txc, const TActorContext& ctx) override; // returns true to continue batching - virtual bool PersistSingleStats(const TPathId& pathId, const TItem& item, TTransactionContext& txc, const TActorContext& ctx) = 0; + virtual bool PersistSingleStats(const TPathId& pathId, const TItem& item, TInstant now, TTransactionContext& txc, const TActorContext& ctx) = 0; virtual void ScheduleNextBatch(const TActorContext& ctx) = 0; }; diff --git a/ydb/core/tx/schemeshard/schemeshard__stats_impl.h b/ydb/core/tx/schemeshard/schemeshard__stats_impl.h index 25048a35af22..a9d35a001861 100644 --- a/ydb/core/tx/schemeshard/schemeshard__stats_impl.h +++ b/ydb/core/tx/schemeshard/schemeshard__stats_impl.h @@ -118,13 +118,14 @@ bool TTxStoreStats::Execute(NTabletFlatExecutor::TTransactionContext& tx return true; } + TInstant now = AppData(ctx)->TimeProvider->Now(); TMonotonic start = TMonotonic::Now(); const ui32 maxBatchSize = Queue.MaxBatchSize(); ui32 batchSize = 0; for (; batchSize < maxBatchSize && !Queue.Empty(); ++batchSize) { auto item = Queue.Next(); Queue.WriteLatencyMetric(item); - if (!PersistSingleStats(item.PathId(), item, txc, ctx)) { + if (!PersistSingleStats(item.PathId(), item, now, txc, ctx)) { break; } diff --git a/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp b/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp index 68f27060d48c..8f8df2c391cf 100644 --- a/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp +++ b/ydb/core/tx/schemeshard/schemeshard__table_stats.cpp @@ -6,31 +6,6 @@ #include #include -namespace { - -THashMap MapChannelsToStoragePoolKinds(const NActors::TActorContext& ctx, - const NKikimr::TStoragePools& pools, - const NKikimr::TChannelsBindings& bindings -) { - THashMap nameToKindMap(pools.size()); - for (const auto& pool : pools) { - nameToKindMap.emplace(pool.GetName(), pool.GetKind()); - } - THashMap channelsMapping(bindings.size()); - for (ui32 channel = 0u; channel < bindings.size(); ++channel) { - if (const auto* poolKind = nameToKindMap.FindPtr(bindings[channel].GetStoragePoolName())) { - channelsMapping.emplace(channel, *poolKind); - } else { - LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, - "MapChannelsToStoragePoolKinds: the subdomain has no info about the storage pool named " - << bindings[channel].GetStoragePoolName() - ); - } - } - return channelsMapping; -} - -} namespace NKikimr { namespace NSchemeShard { @@ -124,11 +99,11 @@ class TTxStoreTableStats: public TTxStoreStats::TItem& item, TTransactionContext& txc, const TActorContext& ctx) override; + bool PersistSingleStats(const TPathId& pathId, const TStatsQueue::TItem& item, TInstant now, TTransactionContext& txc, const TActorContext& ctx) override; void ScheduleNextBatch(const TActorContext& ctx) override; template - TPartitionStats PrepareStats(const TActorContext& ctx, const T& rec, const THashMap& channelsMapping = {}) const; + TPartitionStats PrepareStats(const T& rec, TInstant now, const NKikimr::TStoragePools& pools, const NKikimr::TChannelsBindings& bindings) const; }; @@ -159,10 +134,27 @@ THolder MergeRequest( return std::move(request); } +const TString* GetPoolKind(const NKikimr::TChannelBind& channelBind, const TStoragePools& pools) { + auto findPoolByName = [](const auto& pools, const auto& name) { + return std::find_if(pools.begin(), pools.end(), [&name](const auto& pool) { + return pool.GetName() == name; + }); + }; + // fast: use pool kind specified by the channel bind + // slower: find pool kind by name + if (const auto& poolKind = channelBind.GetStoragePoolKind(); !poolKind.empty()) { + return &poolKind; + } else if (const auto& found = findPoolByName(pools, channelBind.GetStoragePoolName()); found != pools.end()) { + return &found->GetKind(); + } + return nullptr; +}; + template -TPartitionStats TTxStoreTableStats::PrepareStats(const TActorContext& ctx, - const T& rec, - const THashMap& channelsMapping +TPartitionStats TTxStoreTableStats::PrepareStats(const T& rec, + TInstant now, + const NKikimr::TStoragePools& pools, + const NKikimr::TChannelsBindings& bindings ) const { const auto& tableStats = rec.GetTableStats(); const auto& tabletMetrics = rec.GetTabletMetrics(); @@ -176,17 +168,20 @@ TPartitionStats TTxStoreTableStats::PrepareStats(const TActorContext& ctx, newStats.ByKeyFilterSize = tableStats.GetByKeyFilterSize(); newStats.LastAccessTime = TInstant::MilliSeconds(tableStats.GetLastAccessTime()); newStats.LastUpdateTime = TInstant::MilliSeconds(tableStats.GetLastUpdateTime()); + for (const auto& channelStats : tableStats.GetChannels()) { - if (const auto* poolKind = channelsMapping.FindPtr(channelStats.GetChannel())) { - auto& [dataSize, indexSize] = newStats.StoragePoolsStats[*poolKind]; - dataSize += channelStats.GetDataSize(); - indexSize += channelStats.GetIndexSize(); - } else { - LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, - "PrepareStats: SchemeShard has no info on DataShard " - << rec.GetDatashardId() << " channel " << channelStats.GetChannel() << " binding" - ); + const ui32 channel = channelStats.GetChannel(); + if (channel < bindings.size()) { + const auto& channelBind = bindings[channel]; + if (auto* poolKindPtr = GetPoolKind(channelBind, pools); poolKindPtr != nullptr) { + auto& [dataSize, indexSize] = newStats.StoragePoolsStats[*poolKindPtr]; + dataSize += channelStats.GetDataSize(); + indexSize += channelStats.GetIndexSize(); + } + // skip update for unknown pool kind } + // skip update for unknown channel + //NOTE: intentionally not logging to avoid flooding the log } newStats.ImmediateTxCompleted = tableStats.GetImmediateTxCompleted(); @@ -206,7 +201,6 @@ TPartitionStats TTxStoreTableStats::PrepareStats(const TActorContext& ctx, newStats.LocksWholeShard = tableStats.GetLocksWholeShard(); newStats.LocksBroken = tableStats.GetLocksBroken(); - TInstant now = AppData(ctx)->TimeProvider->Now(); newStats.SetCurrentRawCpuUsage(tabletMetrics.GetCPU(), now); newStats.Memory = tabletMetrics.GetMemory(); newStats.Network = tabletMetrics.GetNetwork(); @@ -238,6 +232,7 @@ TPartitionStats TTxStoreTableStats::PrepareStats(const TActorContext& ctx, bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, const TStatsQueueItem& item, + TInstant now, NTabletFlatExecutor::TTransactionContext& txc, const TActorContext& ctx) { const auto& rec = item.Ev->Get()->Record; const auto datashardId = TTabletId(rec.GetDatashardId()); @@ -247,21 +242,43 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, ui64 dataSize = tableStats.GetDataSize(); ui64 rowCount = tableStats.GetRowCount(); - const bool isDataShard = Self->Tables.contains(pathId); - const bool isOlapStore = Self->OlapStores.contains(pathId); - const bool isColumnTable = Self->ColumnTables.contains(pathId); + const auto pathElementIt = Self->PathsById.find(pathId); + if (pathElementIt == Self->PathsById.end()) { + LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "PersistSingleStats for pathId " << pathId + << ", tabletId " << datashardId + << ", followerId " << followerId + << ": unknown pathId" + ); + return true; + } + const auto& pathElement = pathElementIt->second; + if (pathElement->Dropped()) { + LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "PersistSingleStats for pathId " << pathId + << ", tabletId " << datashardId + << ", followerId " << followerId + << ": pathId is dropped" + ); + return true; + } + + const bool isDataShard = pathElement->IsTable(); + const bool isOlapStore = pathElement->IsOlapStore(); + const bool isColumnTable = pathElement->IsColumnTable(); if (!isDataShard && !isOlapStore && !isColumnTable) { LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "Unexpected stats from shard " << datashardId); return true; } - if (!Self->TabletIdToShardIdx.contains(datashardId)) { + TShardIdx shardIdx = [this, &datashardId]() { + auto found = Self->TabletIdToShardIdx.find(datashardId); + return (found != Self->TabletIdToShardIdx.end()) ? found->second : InvalidShardIdx; + }(); + if (!shardIdx) { LOG_ERROR_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "No shardIdx for shard " << datashardId); return true; } - TShardIdx shardIdx = Self->TabletIdToShardIdx[datashardId]; LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "PersistSingleStats for pathId " << pathId.LocalPathId << " shard idx " << shardIdx << " data size " << dataSize << " row count " << rowCount ); @@ -273,13 +290,9 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, return true; } - auto subDomainInfo = Self->ResolveDomainInfo(pathId); - const auto channelsMapping = MapChannelsToStoragePoolKinds(ctx, - subDomainInfo->EffectiveStoragePools(), - shardInfo->BindedChannels); + auto subDomainInfo = Self->ResolveDomainInfo(pathElement); - const auto pathElement = Self->PathsById[pathId]; - const TPartitionStats newStats = PrepareStats(ctx, rec, channelsMapping); + const TPartitionStats newStats = PrepareStats(rec, now, subDomainInfo->EffectiveStoragePools(), shardInfo->BindedChannels); LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "TTxStoreTableStats.PersistSingleStats: main stats from" @@ -294,8 +307,6 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, NIceDb::TNiceDb db(txc.DB); TTableInfo::TPtr table; - TPartitionStats oldAggrStats; - TPartitionStats newAggrStats; bool updateSubdomainInfo = false; TMaybe nodeId; @@ -316,14 +327,15 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, return true; } + TDiskSpaceUsageDelta diskSpaceUsageDelta; + if (isDataShard) { table = Self->Tables[pathId]; - oldAggrStats = table->GetStats().Aggregated; - table->UpdateShardStats(shardIdx, newStats); + table->UpdateShardStats(&diskSpaceUsageDelta, shardIdx, newStats, now); if (!table->IsBackup) { Self->UpdateBackgroundCompaction(shardIdx, newStats); - Self->UpdateShardMetrics(shardIdx, newStats); + Self->UpdateShardMetrics(shardIdx, newStats, now); } if (!newStats.HasBorrowedData) { @@ -335,23 +347,20 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, } if (!table->IsBackup && !table->IsShardsStatsDetached()) { - newAggrStats = table->GetStats().Aggregated; updateSubdomainInfo = true; } Self->PersistTablePartitionStats(db, pathId, shardIdx, table); } else if (isOlapStore) { TOlapStoreInfo::TPtr olapStore = Self->OlapStores[pathId]; - oldAggrStats = olapStore->GetStats().Aggregated; - olapStore->UpdateShardStats(shardIdx, newStats); - newAggrStats = olapStore->GetStats().Aggregated; + olapStore->UpdateShardStats(&diskSpaceUsageDelta, shardIdx, newStats, now); updateSubdomainInfo = true; const auto tables = rec.GetTables(); LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "OLAP store contains " << tables.size() << " tables."); for (const auto& table : tables) { - const TPartitionStats newTableStats = PrepareStats(ctx, table); + const TPartitionStats newTableStats = PrepareStats(table, now, {}, {}); const TPathId tablePathId = TPathId(TOwnerId(pathId.OwnerId), TLocalPathId(table.GetTableLocalId())); @@ -359,28 +368,38 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, LOG_TRACE_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "add stats for exists table with pathId=" << tablePathId); - Self->ColumnTables.GetVerifiedPtr(tablePathId)->UpdateTableStats(shardIdx, tablePathId, newTableStats); + Self->ColumnTables.GetVerifiedPtr(tablePathId)->UpdateTableStats(shardIdx, tablePathId, newTableStats, now); } else { LOG_WARN_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "failed add stats for table with pathId=" << tablePathId); } } + LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, + "Aggregated stats for pathId " << pathId.LocalPathId + << ": RowCount " << olapStore->Stats.Aggregated.RowCount + << ", DataSize " << olapStore->Stats.Aggregated.DataSize + ); + } else if (isColumnTable) { LOG_INFO_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, "PersistSingleStats: ColumnTable rec.GetColumnTables() size=" << rec.GetTables().size()); auto columnTable = Self->ColumnTables.GetVerifiedPtr(pathId); - oldAggrStats = columnTable->GetStats().Aggregated; - columnTable->UpdateShardStats(shardIdx, newStats); - newAggrStats = columnTable->GetStats().Aggregated; + columnTable->UpdateShardStats(&diskSpaceUsageDelta, shardIdx, newStats, now); updateSubdomainInfo = true; + + LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, + "Aggregated stats for pathId " << pathId.LocalPathId + << ": RowCount " << columnTable->Stats.Aggregated.RowCount + << ", DataSize " << columnTable->Stats.Aggregated.DataSize + ); } if (updateSubdomainInfo) { - auto subDomainId = Self->ResolvePathIdForDomain(pathId); - subDomainInfo->AggrDiskSpaceUsage(Self, newAggrStats, oldAggrStats); + subDomainInfo->AggrDiskSpaceUsage(Self, diskSpaceUsageDelta); if (subDomainInfo->CheckDiskSpaceQuotas(Self)) { + auto subDomainId = Self->ResolvePathIdForDomain(pathElement); Self->PersistSubDomainState(db, subDomainId, *subDomainInfo); // Publish is done in a separate transaction, so we may call this directly TDeque toPublish; @@ -390,9 +409,6 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, } if (isOlapStore || isColumnTable) { - LOG_DEBUG_S(ctx, NKikimrServices::FLAT_TX_SCHEMESHARD, - "Aggregated stats for pathId " << pathId.LocalPathId - << ": RowCount " << newAggrStats.RowCount << ", DataSize " << newAggrStats.DataSize); return true; } @@ -409,7 +425,6 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, Self->TabletCounters->Percentile()[COUNTER_NUM_SHARDS_BY_TTL_LAG].DecrementFor(lag->Seconds()); } - const auto now = ctx.Now(); if (now >= shardInfo.LastCondErase) { lag = now - shardInfo.LastCondErase; } else { @@ -420,15 +435,15 @@ bool TTxStoreTableStats::PersistSingleStats(const TPathId& pathId, } const TTableIndexInfo* index = Self->Indexes.Value(pathElement->ParentPathId, nullptr).Get(); - const TTableInfo* mainTableForIndex = Self->GetMainTableForIndex(pathId); + const TTableInfo* mainTableForIndex = (index ? Self->GetMainTableForIndex(pathId) : nullptr); TString errStr; const auto forceShardSplitSettings = Self->SplitSettings.GetForceShardSplitSettings(); TVector shardsToMerge; TString mergeReason; if ((!index || index->State == NKikimrSchemeOp::EIndexStateReady) - && Self->CheckInFlightLimit(NKikimrSchemeOp::ESchemeOpSplitMergeTablePartitions, errStr) - && table->CheckCanMergePartitions(Self->SplitSettings, forceShardSplitSettings, shardIdx, Self->ShardInfos[shardIdx].TabletID, shardsToMerge, mainTableForIndex, mergeReason) + && Self->CheckInFlightLimit(TTxState::ETxType::TxSplitTablePartition, errStr) + && table->CheckCanMergePartitions(Self->SplitSettings, forceShardSplitSettings, shardIdx, Self->ShardInfos[shardIdx].TabletID, shardsToMerge, mainTableForIndex, now, mergeReason) ) { TTxId txId = Self->GetCachedTxId(ctx); @@ -630,8 +645,9 @@ void TSchemeShard::ScheduleTableStatsBatch(const TActorContext& ctx) { void TSchemeShard::UpdateShardMetrics( const TShardIdx& shardIdx, - const TPartitionStats& newStats) -{ + const TPartitionStats& newStats, + TInstant now +) { if (newStats.HasBorrowedData) ShardsWithBorrowed.insert(shardIdx); else @@ -663,7 +679,6 @@ void TSchemeShard::UpdateShardMetrics( metrics.RowDeletes = newStats.RowDeletes; TabletCounters->Percentile()[COUNTER_SHARDS_WITH_ROW_DELETES].IncrementFor(metrics.RowDeletes); - auto now = AppData()->TimeProvider->Now(); auto compactionTime = TInstant::Seconds(newStats.FullCompactionTs); if (now >= compactionTime) metrics.HoursSinceFullCompaction = (now - compactionTime).Hours(); diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.cpp b/ydb/core/tx/schemeshard/schemeshard_impl.cpp index 23ff66d73002..3095d0f2cafc 100644 --- a/ydb/core/tx/schemeshard/schemeshard_impl.cpp +++ b/ydb/core/tx/schemeshard/schemeshard_impl.cpp @@ -7619,6 +7619,7 @@ void TSchemeShard::SetPartitioning(TPathId pathId, TTableInfo::TPtr tableInfo, T newPartitioningSet.reserve(newPartitioning.size()); const auto& oldPartitioning = tableInfo->GetPartitions(); + TInstant now = AppData()->TimeProvider->Now(); for (const auto& p: newPartitioning) { if (!oldPartitioning.empty()) newPartitioningSet.insert(p.ShardIdx); @@ -7627,7 +7628,7 @@ void TSchemeShard::SetPartitioning(TPathId pathId, TTableInfo::TPtr tableInfo, T auto it = partitionStats.find(p.ShardIdx); if (it != partitionStats.end()) { EnqueueBackgroundCompaction(p.ShardIdx, it->second); - UpdateShardMetrics(p.ShardIdx, it->second); + UpdateShardMetrics(p.ShardIdx, it->second, now); } } diff --git a/ydb/core/tx/schemeshard/schemeshard_impl.h b/ydb/core/tx/schemeshard/schemeshard_impl.h index f4529c0b3009..2e4a0f33d378 100644 --- a/ydb/core/tx/schemeshard/schemeshard_impl.h +++ b/ydb/core/tx/schemeshard/schemeshard_impl.h @@ -68,6 +68,8 @@ #include +#include + namespace NKikimr::NSchemeShard::NBackground { struct TEvListRequest; } @@ -382,7 +384,7 @@ class TSchemeShard TDuration StatsMaxExecuteTime; TDuration StatsBatchTimeout; ui32 StatsMaxBatchSize = 0; - THashMap InFlightLimits; + absl::flat_hash_map InFlightLimits; // time when we opened the batch bool TableStatsBatchScheduled = false; @@ -993,7 +995,7 @@ class TSchemeShard void RemoveBackgroundCleaning(const TPathId& pathId); std::optional ResolveTempDirInfo(const TPathId& pathId); - void UpdateShardMetrics(const TShardIdx& shardIdx, const TPartitionStats& newStats); + void UpdateShardMetrics(const TShardIdx& shardIdx, const TPartitionStats& newStats, TInstant now); void RemoveShardMetrics(const TShardIdx& shardIdx); NOperationQueue::EStartStatus StartBackgroundCompaction(const TShardCompactionInfo& info); diff --git a/ydb/core/tx/schemeshard/schemeshard_info_types.cpp b/ydb/core/tx/schemeshard/schemeshard_info_types.cpp index 4846faddd8c8..7f143bd782ab 100644 --- a/ydb/core/tx/schemeshard/schemeshard_info_types.cpp +++ b/ydb/core/tx/schemeshard/schemeshard_info_types.cpp @@ -276,6 +276,37 @@ void TSubDomainInfo::AggrDiskSpaceUsage(IQuotaCounters* counters, const TPartiti } } +void TSubDomainInfo::AggrDiskSpaceUsage(IQuotaCounters* counters, const TDiskSpaceUsageDelta& diskSpaceUsageDelta) { + // just (in)sanity check, diskSpaceUsageDelta should have at least 1 element + if (diskSpaceUsageDelta.empty()) { + return; + } + // see filling of diskSpaceUsageDelta in UpdateShardStats() + // total space usage, index 0 + { + const auto& [poolKind, delta] = diskSpaceUsageDelta[0]; + + DiskSpaceUsage.Tables.DataSize += delta.DataSize; + counters->ChangeDiskSpaceTablesDataBytes(delta.DataSize); + + DiskSpaceUsage.Tables.IndexSize += delta.IndexSize; + counters->ChangeDiskSpaceTablesIndexBytes(delta.IndexSize); + + i64 oldTotalBytes = DiskSpaceUsage.Tables.TotalSize; + DiskSpaceUsage.Tables.TotalSize = DiskSpaceUsage.Tables.DataSize + DiskSpaceUsage.Tables.IndexSize; + i64 newTotalBytes = DiskSpaceUsage.Tables.TotalSize; + counters->ChangeDiskSpaceTablesTotalBytes(newTotalBytes - oldTotalBytes); + } + // space usage by storage pool kinds, from index 1 onwards + for (size_t i = 1; i < diskSpaceUsageDelta.size(); ++i) { + const auto& [poolKind, delta] = diskSpaceUsageDelta[i]; + auto& r = DiskSpaceUsage.StoragePoolsUsage[poolKind]; + r.DataSize += delta.DataSize; + r.IndexSize += delta.IndexSize; + counters->AddDiskSpaceTables(GetUserFacingStorageType(poolKind), delta.DataSize, delta.IndexSize); + } +} + void TSubDomainInfo::AggrDiskSpaceUsage(const TTopicStats& newAggr, const TTopicStats& oldAggr) { auto& topics = DiskSpaceUsage.Topics; topics.DataSize += (newAggr.DataSize - oldAggr.DataSize); @@ -1640,6 +1671,8 @@ void TTableInfo::FinishAlter() { partitionConfig.ClearShadowData(); } + IsExternalBlobsEnabled = PartitionConfigHasExternalBlobsEnabled(PartitionConfig()); + // Apply TTL params if (AlterData->TableDescriptionFull.Defined() && AlterData->TableDescriptionFull->HasTTLSettings()) { MutableTTLSettings().Swap(AlterData->TableDescriptionFull->MutableTTLSettings()); @@ -1758,16 +1791,18 @@ void TTableInfo::SetPartitioning(TVector&& newPartitioning) { } } -void TTableInfo::UpdateShardStats(TShardIdx datashardIdx, const TPartitionStats& newStats) { - Stats.UpdateShardStats(datashardIdx, newStats); +void TTableInfo::UpdateShardStats(TDiskSpaceUsageDelta* diskSpaceUsageDelta, TShardIdx datashardIdx, const TPartitionStats& newStats, TInstant now) { + Stats.UpdateShardStats(diskSpaceUsageDelta, datashardIdx, newStats, now); } -void TTableAggregatedStats::UpdateShardStats(TShardIdx datashardIdx, const TPartitionStats& newStats) { +void TTableAggregatedStats::UpdateShardStats(TDiskSpaceUsageDelta* diskSpaceUsageDelta, TShardIdx datashardIdx, const TPartitionStats& newStats, TInstant now) { + auto found = PartitionStats.find(datashardIdx); // Ignore stats from unknown datashard (it could have been split) - if (!PartitionStats.contains(datashardIdx)) + if (found == PartitionStats.end()) { return; + } - TPartitionStats& oldStats = PartitionStats[datashardIdx]; + TPartitionStats& oldStats = found->second; if (newStats.SeqNo <= oldStats.SeqNo) { // Ignore outdated message @@ -1787,29 +1822,51 @@ void TTableAggregatedStats::UpdateShardStats(TShardIdx datashardIdx, const TPart oldStats.RangeReadRows = 0; } - Aggregated.RowCount += (newStats.RowCount - oldStats.RowCount); - Aggregated.DataSize += (newStats.DataSize - oldStats.DataSize); - Aggregated.IndexSize += (newStats.IndexSize - oldStats.IndexSize); - Aggregated.ByKeyFilterSize += (newStats.ByKeyFilterSize - oldStats.ByKeyFilterSize); + // disk space related stuff + // first, total space aggregation + { + TStoragePoolStatsDelta delta{ + .DataSize = (static_cast(newStats.DataSize) - static_cast(oldStats.DataSize)), + .IndexSize = (static_cast(newStats.IndexSize) - static_cast(oldStats.IndexSize)), + }; + diskSpaceUsageDelta->emplace_back(TString(), delta); + Aggregated.DataSize += delta.DataSize; + Aggregated.IndexSize += delta.IndexSize; + } + // second, aggregation of space separated by storage pool kinds for (const auto& [poolKind, newStoragePoolStats] : newStats.StoragePoolsStats) { - auto& [dataSize, indexSize] = Aggregated.StoragePoolsStats[poolKind]; const auto* oldStoragePoolStats = oldStats.StoragePoolsStats.FindPtr(poolKind); // Missing old stats for a particular storage pool are interpreted as if this data // has just been written to the datashard and we need to increment the aggregate by the entire new stats' sizes. - dataSize += newStoragePoolStats.DataSize - (oldStoragePoolStats ? oldStoragePoolStats->DataSize : 0u); - indexSize += newStoragePoolStats.IndexSize - (oldStoragePoolStats ? oldStoragePoolStats->IndexSize : 0u); + TStoragePoolStatsDelta delta{ + .DataSize = (static_cast(newStoragePoolStats.DataSize) - (oldStoragePoolStats ? static_cast(oldStoragePoolStats->DataSize) : 0)), + .IndexSize = (static_cast(newStoragePoolStats.IndexSize) - (oldStoragePoolStats ? static_cast(oldStoragePoolStats->IndexSize) : 0)), + }; + diskSpaceUsageDelta->emplace_back(poolKind, delta); } for (const auto& [poolKind, oldStoragePoolStats] : oldStats.StoragePoolsStats) { if (const auto* newStoragePoolStats = newStats.StoragePoolsStats.FindPtr(poolKind); !newStoragePoolStats ) { - auto& [dataSize, indexSize] = Aggregated.StoragePoolsStats[poolKind]; // Missing new stats for a particular storage pool are interpreted as if this data // has been removed from the datashard and we need to subtract the old stats' sizes from the aggregate. - dataSize -= oldStoragePoolStats.DataSize; - indexSize -= oldStoragePoolStats.IndexSize; + TStoragePoolStatsDelta delta{ + .DataSize = -static_cast(oldStoragePoolStats.DataSize), + .IndexSize = -static_cast(oldStoragePoolStats.IndexSize), + }; + diskSpaceUsageDelta->emplace_back(poolKind, delta); } } + // from index 1 onwards (as index 0 holds total space delta) + for (size_t i = 1; i < diskSpaceUsageDelta->size(); ++i) { + const auto& [poolKind, delta] = (*diskSpaceUsageDelta)[i]; + auto& r = Aggregated.StoragePoolsStats[poolKind]; + r.DataSize += delta.DataSize; + r.IndexSize += delta.IndexSize; + } + + Aggregated.RowCount += (newStats.RowCount - oldStats.RowCount); + Aggregated.ByKeyFilterSize += (newStats.ByKeyFilterSize - oldStats.ByKeyFilterSize); Aggregated.LastAccessTime = Max(Aggregated.LastAccessTime, newStats.LastAccessTime); Aggregated.LastUpdateTime = Max(Aggregated.LastUpdateTime, newStats.LastUpdateTime); Aggregated.ImmediateTxCompleted += (newStats.ImmediateTxCompleted - oldStats.ImmediateTxCompleted); @@ -1827,7 +1884,6 @@ void TTableAggregatedStats::UpdateShardStats(TShardIdx datashardIdx, const TPart i64 cpuUsageDelta = newStats.GetCurrentRawCpuUsage() - oldStats.GetCurrentRawCpuUsage(); i64 prevCpuUsage = Aggregated.GetCurrentRawCpuUsage(); ui64 newAggregatedCpuUsage = std::max(0, prevCpuUsage + cpuUsageDelta); - TInstant now = AppData()->TimeProvider->Now(); Aggregated.SetCurrentRawCpuUsage(newAggregatedCpuUsage, now); Aggregated.Memory += (newStats.Memory - oldStats.Memory); Aggregated.Network += (newStats.Network - oldStats.Network); @@ -1858,10 +1914,11 @@ void TTableAggregatedStats::UpdateShardStats(TShardIdx datashardIdx, const TPart UpdatedStats.insert(datashardIdx); } -void TAggregatedStats::UpdateTableStats(TShardIdx shardIdx, const TPathId& pathId, const TPartitionStats& newStats) { +void TAggregatedStats::UpdateTableStats(TShardIdx shardIdx, const TPathId& pathId, const TPartitionStats& newStats, TInstant now) { auto& tableStats = TableStats[pathId]; tableStats.PartitionStats[shardIdx]; // insert if none - tableStats.UpdateShardStats(shardIdx, newStats); + TDiskSpaceUsageDelta unused; + tableStats.UpdateShardStats(&unused, shardIdx, newStats, now); } void TTableInfo::RegisterSplitMergeOp(TOperationId opId, const TTxState& txState) { @@ -1930,6 +1987,7 @@ bool TTableInfo::TryAddShardToMerge(const TSplitSettings& splitSettings, TShardIdx shardIdx, TVector& shardsToMerge, THashSet& partOwners, ui64& totalSize, float& totalLoad, float cpuUsageThreshold, const TTableInfo* mainTableForIndex, + TInstant now, TString& reason) const { if (ExpectedPartitionCount + 1 - shardsToMerge.size() <= GetMinPartitionsCount()) { @@ -1969,7 +2027,6 @@ bool TTableInfo::TryAddShardToMerge(const TSplitSettings& splitSettings, } // Check if we can try merging by load - TInstant now = AppData()->TimeProvider->Now(); TDuration minUptime = TDuration::Seconds(splitSettings.MergeByLoadMinUptimeSec); if (!canMerge && IsMergeByLoadEnabled(mainTableForIndex) && stats->StartTime && stats->StartTime + minUptime < now) { reason = "merge by load"; @@ -2017,6 +2074,7 @@ bool TTableInfo::CheckCanMergePartitions(const TSplitSettings& splitSettings, const TForceShardSplitSettings& forceShardSplitSettings, TShardIdx shardIdx, const TTabletId& tabletId, TVector& shardsToMerge, const TTableInfo* mainTableForIndex, + TInstant now, TString& reason) const { // Don't split/merge backup tables @@ -2056,7 +2114,7 @@ bool TTableInfo::CheckCanMergePartitions(const TSplitSettings& splitSettings, TString shardMergeReason; // Make sure we can actually merge current shard first - if (!TryAddShardToMerge(splitSettings, forceShardSplitSettings, shardIdx, shardsToMerge, partOwners, totalSize, totalLoad, cpuMergeThreshold, mainTableForIndex, shardMergeReason)) { + if (!TryAddShardToMerge(splitSettings, forceShardSplitSettings, shardIdx, shardsToMerge, partOwners, totalSize, totalLoad, cpuMergeThreshold, mainTableForIndex, now, shardMergeReason)) { return false; } @@ -2064,7 +2122,7 @@ bool TTableInfo::CheckCanMergePartitions(const TSplitSettings& splitSettings, << " " << shardMergeReason; for (i64 pi = partitionIdx - 1; pi >= 0; --pi) { - if (!TryAddShardToMerge(splitSettings, forceShardSplitSettings, GetPartitions()[pi].ShardIdx, shardsToMerge, partOwners, totalSize, totalLoad, cpuMergeThreshold, mainTableForIndex, shardMergeReason)) { + if (!TryAddShardToMerge(splitSettings, forceShardSplitSettings, GetPartitions()[pi].ShardIdx, shardsToMerge, partOwners, totalSize, totalLoad, cpuMergeThreshold, mainTableForIndex, now, shardMergeReason)) { break; } } @@ -2072,7 +2130,7 @@ bool TTableInfo::CheckCanMergePartitions(const TSplitSettings& splitSettings, Reverse(shardsToMerge.begin(), shardsToMerge.end()); for (ui64 pi = partitionIdx + 1; pi < GetPartitions().size(); ++pi) { - if (!TryAddShardToMerge(splitSettings, forceShardSplitSettings, GetPartitions()[pi].ShardIdx, shardsToMerge, partOwners, totalSize, totalLoad, cpuMergeThreshold, mainTableForIndex, shardMergeReason)) { + if (!TryAddShardToMerge(splitSettings, forceShardSplitSettings, GetPartitions()[pi].ShardIdx, shardsToMerge, partOwners, totalSize, totalLoad, cpuMergeThreshold, mainTableForIndex, now, shardMergeReason)) { break; } } @@ -2110,14 +2168,11 @@ bool TTableInfo::CheckSplitByLoad( } // Ignore stats from unknown datashard (it could have been split) - if (!Stats.PartitionStats.contains(shardIdx)) { + const auto* stats = Stats.PartitionStats.FindPtr(shardIdx); + if (!stats) { reason = "UnknownDataShard"; return false; } - if (!Shard2PartitionIdx.contains(shardIdx)) { - reason = "ShardNotInIndex"; - return false; - } if (!IsSplitByLoadEnabled(mainTableForIndex)) { reason = "SplitByLoadNotEnabledForTable"; @@ -2146,17 +2201,16 @@ bool TTableInfo::CheckSplitByLoad( } } - const auto& stats = *Stats.PartitionStats.FindPtr(shardIdx); if (rowCount < MIN_ROWS_FOR_SPLIT_BY_LOAD || dataSize < MIN_SIZE_FOR_SPLIT_BY_LOAD || Stats.PartitionStats.size() >= maxShards || - stats.GetCurrentRawCpuUsage() < cpuUsageThreshold * 1000000) + stats->GetCurrentRawCpuUsage() < cpuUsageThreshold * 1000000) { reason = TStringBuilder() << "ConditionsNotMet" << " rowCount: " << rowCount << " minRows: " << MIN_ROWS_FOR_SPLIT_BY_LOAD << " dataSize: " << dataSize << " minSize: " << MIN_SIZE_FOR_SPLIT_BY_LOAD << " shardCount: " << Stats.PartitionStats.size() << " maxShards: " << maxShards - << " cpuUsage: " << stats.GetCurrentRawCpuUsage() << " threshold: " << cpuUsageThreshold * 1000000; + << " cpuUsage: " << stats->GetCurrentRawCpuUsage() << " threshold: " << cpuUsageThreshold * 1000000; return false; } @@ -2167,7 +2221,7 @@ bool TTableInfo::CheckSplitByLoad( << "minShardSize: " << MIN_SIZE_FOR_SPLIT_BY_LOAD << ", " << "shardCount: " << Stats.PartitionStats.size() << ", " << "maxShardCount: " << maxShards << ", " - << "cpuUsage: " << stats.GetCurrentRawCpuUsage() << ", " + << "cpuUsage: " << stats->GetCurrentRawCpuUsage() << ", " << "cpuUsageThreshold: " << cpuUsageThreshold * 1000000 << ")"; return true; diff --git a/ydb/core/tx/schemeshard/schemeshard_info_types.h b/ydb/core/tx/schemeshard/schemeshard_info_types.h index b558b1a0031f..f9ea895ccf88 100644 --- a/ydb/core/tx/schemeshard/schemeshard_info_types.h +++ b/ydb/core/tx/schemeshard/schemeshard_info_types.h @@ -331,6 +331,12 @@ struct TPartitionStats { ui64 CPU = 0; }; +struct TStoragePoolStatsDelta { + i64 DataSize = 0; + i64 IndexSize = 0; +}; +using TDiskSpaceUsageDelta = TVector>; + struct TTableAggregatedStats { TPartitionStats Aggregated; THashMap PartitionStats; @@ -342,13 +348,13 @@ struct TTableAggregatedStats { return Aggregated.PartCount && UpdatedStats.size() == Aggregated.PartCount; } - void UpdateShardStats(TShardIdx datashardIdx, const TPartitionStats& newStats); + void UpdateShardStats(TDiskSpaceUsageDelta* diskSpaceUsageDelta, TShardIdx datashardIdx, const TPartitionStats& newStats, TInstant now); }; struct TAggregatedStats : public TTableAggregatedStats { THashMap TableStats; - void UpdateTableStats(TShardIdx datashardIdx, const TPathId& pathId, const TPartitionStats& newStats); + void UpdateTableStats(TShardIdx datashardIdx, const TPathId& pathId, const TPartitionStats& newStats, TInstant now); }; struct TSubDomainInfo; @@ -467,6 +473,8 @@ struct TTableInfo : public TSimpleRefCount { THashMap PerShardPartitionConfig; + bool IsExternalBlobsEnabled = false; + const NKikimrSchemeOp::TPartitionConfig& PartitionConfig() const { return TableDescription.GetPartitionConfig(); } NKikimrSchemeOp::TPartitionConfig& MutablePartitionConfig() { return *TableDescription.MutablePartitionConfig(); } @@ -575,6 +583,7 @@ struct TTableInfo : public TSimpleRefCount { , IsRestore(alterData.IsRestore) { TableDescription.Swap(alterData.TableDescriptionFull.Get()); + IsExternalBlobsEnabled = PartitionConfigHasExternalBlobsEnabled(TableDescription.GetPartitionConfig()); } static TTableInfo::TPtr DeepCopy(const TTableInfo& other) { @@ -668,7 +677,7 @@ struct TTableInfo : public TSimpleRefCount { ShardsStatsDetached = true; } - void UpdateShardStats(TShardIdx datashardIdx, const TPartitionStats& newStats); + void UpdateShardStats(TDiskSpaceUsageDelta* diskSpaceUsageDelta, TShardIdx datashardIdx, const TPartitionStats& newStats, TInstant now); void RegisterSplitMergeOp(TOperationId txId, const TTxState& txState); @@ -692,12 +701,12 @@ struct TTableInfo : public TSimpleRefCount { const TForceShardSplitSettings& forceShardSplitSettings, TShardIdx shardIdx, TVector& shardsToMerge, THashSet& partOwners, ui64& totalSize, float& totalLoad, - float cpuUsageThreshold, const TTableInfo* mainTableForIndex, TString& reason) const; + float cpuUsageThreshold, const TTableInfo* mainTableForIndex, TInstant now, TString& reason) const; bool CheckCanMergePartitions(const TSplitSettings& splitSettings, const TForceShardSplitSettings& forceShardSplitSettings, TShardIdx shardIdx, const TTabletId& tabletId, TVector& shardsToMerge, - const TTableInfo* mainTableForIndex, TString& reason) const; + const TTableInfo* mainTableForIndex, TInstant now, TString& reason) const; bool CheckSplitByLoad( const TSplitSettings& splitSettings, TShardIdx shardIdx, @@ -710,7 +719,7 @@ struct TTableInfo : public TSimpleRefCount { return false; } // Auto split is always enabled, unless table is using external blobs - return !PartitionConfigHasExternalBlobsEnabled(PartitionConfig()); + return (IsExternalBlobsEnabled == false); } bool IsMergeBySizeEnabled(const TForceShardSplitSettings& params) const { @@ -756,7 +765,7 @@ struct TTableInfo : public TSimpleRefCount { bool IsSplitByLoadEnabled(const TTableInfo* mainTableForIndex) const { // We cannot split when external blobs are enabled - if (PartitionConfigHasExternalBlobsEnabled(PartitionConfig())) { + if (IsExternalBlobsEnabled) { return false; } @@ -1964,6 +1973,7 @@ struct TSubDomainInfo: TSimpleRefCount { } void AggrDiskSpaceUsage(IQuotaCounters* counters, const TPartitionStats& newAggr, const TPartitionStats& oldAggr = {}); + void AggrDiskSpaceUsage(IQuotaCounters* counters, const TDiskSpaceUsageDelta& delta); void AggrDiskSpaceUsage(const TTopicStats& newAggr, const TTopicStats& oldAggr = {}); diff --git a/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp b/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp index d69eaec1542c..a283586c7eef 100644 --- a/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp +++ b/ydb/core/tx/schemeshard/ut_subdomain/ut_subdomain.cpp @@ -3,6 +3,10 @@ #include #include #include +#include // for TSchemeShard +#include // for MakeTestBlob +#include // for NTypeIds and TTypeInfo + using namespace NKikimr; using namespace NSchemeShard; @@ -3129,17 +3133,42 @@ Y_UNIT_TEST_SUITE(TSchemeShardSubDomainTest) { )", {NKikimrScheme::StatusAccepted}); } - Y_UNIT_TEST(DiskSpaceUsage) { + Y_UNIT_TEST_FLAGS(DiskSpaceUsage, DisableStatsBatching, EnablePersistentPartitionStats) { TTestBasicRuntime runtime; + TTestEnvOptions opts; - opts.DisableStatsBatching(true); - opts.EnablePersistentPartitionStats(true); - TTestEnv env(runtime, opts); + opts.DisableStatsBatching(DisableStatsBatching); + opts.EnablePersistentPartitionStats(EnablePersistentPartitionStats); + opts.EnableBackgroundCompaction(false); // make sure background compaction will not interfere + opts.DataShardStatsReportIntervalSeconds(0); // make sure stats will be reported swiftly + + TSchemeShard* schemeshard = nullptr; + TTestEnv env(runtime, opts, + /*TSchemeShardFactory ssFactory*/ + [&schemeshard](const TActorId& tablet, TTabletStorageInfo* info) { + schemeshard = new TSchemeShard(tablet, info); + Cerr << "TEST create schemeshard, " << (void*)schemeshard << Endl; + return schemeshard; + } + ); + + NDataShard::gDbStatsDataSizeResolution = 1; + NDataShard::gDbStatsRowCountResolution = 1; + + if (DisableStatsBatching == false) { + runtime.GetAppData().SchemeShardConfig.SetStatsMaxBatchSize(2); + } + const auto sender = runtime.AllocateEdgeActor(); - auto waitForTableStats = [&](ui32 shards) { + auto waitForFullStatsUpdate = [&](const ui32 count) { + ui64 statsCountBaseline = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get(); TDispatchOptions options; - options.FinalEvents.push_back(TDispatchOptions::TFinalEventCondition(TEvDataShard::EvPeriodicTableStats, shards)); + options.CustomFinalCondition = [&]() { + auto statsCount = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get() - statsCountBaseline; + Cerr << "TEST waitForFullStatsUpdate, schemeshard " << (void*)schemeshard << ", stats written " << statsCount << Endl; + return statsCount >= count; + }; runtime.DispatchEvents(options); }; @@ -3158,6 +3187,14 @@ Y_UNIT_TEST_SUITE(TSchemeShardSubDomainTest) { return result; }; + auto compareDiskSpaceUsage = [&](TString* diff, const NKikimrSubDomains::TDiskSpaceUsage& a, const NKikimrSubDomains::TDiskSpaceUsage& b) -> bool { + using google::protobuf::util::MessageDifferencer; + MessageDifferencer d; + d.ReportDifferencesToString(diff); + d.set_repeated_field_comparison(MessageDifferencer::RepeatedFieldComparison::AS_SET); + return d.Compare(a, b); + }; + ui64 tabletId = TTestTxConfig::FakeHiveTablets; ui64 txId = 100; @@ -3172,13 +3209,15 @@ Y_UNIT_TEST_SUITE(TSchemeShardSubDomainTest) { env.TestWaitNotification(runtime, txId); UpdateRow(runtime, "Table1", 1, "value1", tabletId); - waitForTableStats(1); + waitForFullStatsUpdate(1); auto du = getDiskSpaceUsage(); UNIT_ASSERT_C(du.GetTables().GetTotalSize() > 0, du.ShortDebugString()); RebootTablet(runtime, TTestTxConfig::SchemeShard, sender); - UNIT_ASSERT_VALUES_EQUAL(du.ShortDebugString(), getDiskSpaceUsage().ShortDebugString()); + waitForFullStatsUpdate(1); + TString diff; + UNIT_ASSERT_C(compareDiskSpaceUsage(&diff, du, getDiskSpaceUsage()), diff); } // multi-shard table @@ -3196,16 +3235,486 @@ Y_UNIT_TEST_SUITE(TSchemeShardSubDomainTest) { UpdateRow(runtime, "Table2", 1, "value1", tabletId + 0); UpdateRow(runtime, "Table2", 2, "value2", tabletId + 1); - waitForTableStats(1 /* Table1 */ + 2 /* Table2 */); + const ui32 shardCount = 1 /* Table1 */ + 2 /* Table2 */; + waitForFullStatsUpdate(shardCount); auto du = getDiskSpaceUsage(); UNIT_ASSERT_C(du.GetTables().GetTotalSize() > 0, du.ShortDebugString()); RebootTablet(runtime, TTestTxConfig::SchemeShard, sender); - UNIT_ASSERT_VALUES_EQUAL(du.ShortDebugString(), getDiskSpaceUsage().ShortDebugString()); + waitForFullStatsUpdate(shardCount); + TString diff; + UNIT_ASSERT_C(compareDiskSpaceUsage(&diff, du, getDiskSpaceUsage()), diff); + } + } + + Y_UNIT_TEST_FLAG(DiskSpaceUsageWithPersistedLeftovers, DisableStatsBatching) { + TTestBasicRuntime runtime; + + TTestEnvOptions opts; + opts.DisableStatsBatching(DisableStatsBatching); + opts.EnableBackgroundCompaction(false); // make sure background compaction will not interfere + opts.DataShardStatsReportIntervalSeconds(0); // make sure stats will be reported swiftly + + TSchemeShard* schemeshard = nullptr; + TTestEnv env(runtime, opts, + /*TSchemeShardFactory ssFactory*/ + [&schemeshard](const TActorId& tablet, TTabletStorageInfo* info) { + schemeshard = new TSchemeShard(tablet, info); + Cerr << "TEST create schemeshard, " << (void*)schemeshard << Endl; + return schemeshard; + } + ); + + NDataShard::gDbStatsDataSizeResolution = 1; + NDataShard::gDbStatsRowCountResolution = 1; + + if (DisableStatsBatching == false) { + runtime.GetAppData().SchemeShardConfig.SetStatsMaxBatchSize(2); + } + + const auto sender = runtime.AllocateEdgeActor(); + + auto waitForFullStatsUpdate = [&](const ui32 count) { + ui64 statsCountBaseline = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get(); + TDispatchOptions options; + options.CustomFinalCondition = [&]() { + auto statsCount = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get() - statsCountBaseline; + Cerr << "TEST waitForFullStatsUpdate, schemeshard " << (void*)schemeshard << ", stats written " << statsCount << Endl; + return statsCount >= count; + }; + runtime.DispatchEvents(options); + }; + + auto getDiskSpaceUsage = [&]() { + NKikimrSubDomains::TDiskSpaceUsage result; + + TestDescribeResult( + DescribePath(runtime, "/MyRoot"), { + NLs::PathExist, + NLs::Finished, [&result] (const NKikimrScheme::TEvDescribeSchemeResult& record) { + result = record.GetPathDescription().GetDomainDescription().GetDiskSpaceUsage(); + } + } + ); + + return result; + }; + + auto compareDiskSpaceUsage = [&](TString* diff, const NKikimrSubDomains::TDiskSpaceUsage& a, const NKikimrSubDomains::TDiskSpaceUsage& b) -> bool { + using google::protobuf::util::MessageDifferencer; + MessageDifferencer d; + d.ReportDifferencesToString(diff); + d.set_repeated_field_comparison(MessageDifferencer::RepeatedFieldComparison::AS_SET); + return d.Compare(a, b); + }; + + ui64 tabletId = TTestTxConfig::FakeHiveTablets; + ui64 txId = 100; + + // multi-shard table + { + runtime.GetAppData().FeatureFlags.SetEnablePersistentPartitionStats(true); + + TestCreateTable(runtime, ++txId, "/MyRoot", R"( + Name: "Table2" + Columns { Name: "key" Type: "Uint32"} + Columns { Name: "value" Type: "Utf8"} + KeyColumnNames: ["key"] + UniformPartitionsCount: 2 + )"); + env.TestWaitNotification(runtime, txId); + + UpdateRow(runtime, "Table2", 1, "value1", tabletId + 0); + UpdateRow(runtime, "Table2", 2, "value2", tabletId + 1); + + const ui32 shardCount = 2; + waitForFullStatsUpdate(shardCount); + + auto du = getDiskSpaceUsage(); + UNIT_ASSERT_C(du.GetTables().GetTotalSize() > 0, du.ShortDebugString()); + + runtime.GetAppData().FeatureFlags.SetEnablePersistentPartitionStats(false); + + RebootTablet(runtime, TTestTxConfig::SchemeShard, sender); + + waitForFullStatsUpdate(shardCount); + TString diff; + UNIT_ASSERT_C(compareDiskSpaceUsage(&diff, du, getDiskSpaceUsage()), diff); + } + } + + Y_UNIT_TEST_FLAGS(DiskSpaceUsageWithTable, DisableStatsBatching, EnablePersistentPartitionStats) { + TTestBasicRuntime runtime; + + TTestEnvOptions opts; + opts.DisableStatsBatching(DisableStatsBatching); + opts.EnablePersistentPartitionStats(EnablePersistentPartitionStats); + opts.EnableBackgroundCompaction(false); // make sure background compaction will not interfere + opts.DataShardStatsReportIntervalSeconds(0); // make sure stats will be reported swiftly + + TSchemeShard* schemeshard = nullptr; + TTestEnv env(runtime, opts, + /*TSchemeShardFactory ssFactory*/ + [&schemeshard](const TActorId& tablet, TTabletStorageInfo* info) { + schemeshard = new TSchemeShard(tablet, info); + Cerr << "TEST create schemeshard, " << (void*)schemeshard << Endl; + return schemeshard; + } + ); + + NDataShard::gDbStatsDataSizeResolution = 1; + NDataShard::gDbStatsRowCountResolution = 1; + + if (DisableStatsBatching == false) { + runtime.GetAppData().SchemeShardConfig.SetStatsMaxBatchSize(2); + } + + const auto sender = runtime.AllocateEdgeActor(); + + auto waitForFullStatsUpdate = [&](const ui32 count) { + ui64 statsCountBaseline = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get(); + TDispatchOptions options; + options.CustomFinalCondition = [&]() { + auto statsCount = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get() - statsCountBaseline; + Cerr << "TEST waitForFullStatsUpdate, schemeshard " << (void*)schemeshard << ", stats written " << statsCount << Endl; + return statsCount >= count; + }; + runtime.DispatchEvents(options); + }; + + auto compareProto = [&](TString* diff, const auto& a, const auto& b) -> bool { + using google::protobuf::util::MessageDifferencer; + MessageDifferencer d; + d.ReportDifferencesToString(diff); + d.set_repeated_field_comparison(MessageDifferencer::RepeatedFieldComparison::AS_SET); + return d.Compare(a, b); + }; + + ui64 txId = 100; + + // test body + + // 1. create object and fill it with data + const ui32 shardCount = 2; + { + TestCreateTable(runtime, ++txId, "/MyRoot", Sprintf(R"( + Name: "Table" + Columns { Name: "key" Type: "Uint32"} + Columns { Name: "value" Type: "Utf8"} + KeyColumnNames: ["key"] + UniformPartitionsCount: %d + )", shardCount + )); + env.TestWaitNotification(runtime, txId); + + const ui64 tabletId = TTestTxConfig::FakeHiveTablets; + UpdateRow(runtime, "Table", 1, "value1", tabletId + 0); + UpdateRow(runtime, "Table", 2, "value2", tabletId + 1); } + + // 2. wait for all shard stats to be processed + waitForFullStatsUpdate(shardCount); + + // 3. check that disk space usage at subdomain level and table level is the same + auto getUsage = [](const auto& describe) { + return std::make_pair( + describe.GetPathDescription().GetDomainDescription().GetDiskSpaceUsage(), + describe.GetPathDescription().GetTableStats() + ); + }; + auto describeBefore = DescribePath(runtime, "/MyRoot/Table"); + const auto& [subdomainDiskUsageBefore, storeUsageBefore] = getUsage(describeBefore); + UNIT_ASSERT_GT_C(subdomainDiskUsageBefore.GetTables().GetDataSize(), 0, subdomainDiskUsageBefore.DebugString()); + UNIT_ASSERT_GT_C(storeUsageBefore.GetDataSize(), 0, storeUsageBefore.DebugString()); + UNIT_ASSERT_VALUES_EQUAL(subdomainDiskUsageBefore.GetTables().GetDataSize(), storeUsageBefore.GetDataSize()); + + // 4. reboot schemeshard + RebootTablet(runtime, TTestTxConfig::SchemeShard, sender); + + // 5. wait for all shard stats to be processed + waitForFullStatsUpdate(shardCount); + + // 6. check that disk space usage levels is the same as before reboot + auto describeAfter = DescribePath(runtime, "/MyRoot/Table"); + const auto& [subdomainDiskUsageAfter, storeUsageAfter] = getUsage(describeAfter); + TString diff; + UNIT_ASSERT_C(compareProto(&diff, subdomainDiskUsageBefore, subdomainDiskUsageAfter), diff); + UNIT_ASSERT_C(compareProto(&diff, storeUsageBefore, storeUsageAfter), diff); + UNIT_ASSERT_VALUES_EQUAL(subdomainDiskUsageAfter.GetTables().GetDataSize(), storeUsageAfter.GetDataSize()); } + Y_UNIT_TEST_FLAGS(DiskSpaceUsageWithColumnTableInStore, DisableStatsBatching, EnablePersistentPartitionStats) { + TTestBasicRuntime runtime; + + TTestEnvOptions opts; + opts.DisableStatsBatching(DisableStatsBatching); + opts.EnablePersistentPartitionStats(EnablePersistentPartitionStats); + opts.EnableBackgroundCompaction(false); // make sure background compaction will not interfere + opts.DataShardStatsReportIntervalSeconds(0); // make sure stats will be reported swiftly + + TSchemeShard* schemeshard = nullptr; + TTestEnv env(runtime, opts, + /*TSchemeShardFactory ssFactory*/ + [&schemeshard](const TActorId& tablet, TTabletStorageInfo* info) { + schemeshard = new TSchemeShard(tablet, info); + Cerr << "TEST create schemeshard, " << (void*)schemeshard << Endl; + return schemeshard; + } + ); + + NDataShard::gDbStatsDataSizeResolution = 1; + NDataShard::gDbStatsRowCountResolution = 1; + + if (DisableStatsBatching == false) { + runtime.GetAppData().SchemeShardConfig.SetStatsMaxBatchSize(2); + } + + const auto sender = runtime.AllocateEdgeActor(); + + auto waitForFullStatsUpdate = [&](const ui32 count) { + ui64 statsCountBaseline = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get(); + TDispatchOptions options; + options.CustomFinalCondition = [&]() { + auto statsCount = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get() - statsCountBaseline; + Cerr << "TEST waitForFullStatsUpdate, schemeshard " << (void*)schemeshard << ", stats written " << statsCount << Endl; + return statsCount >= count; + }; + runtime.DispatchEvents(options); + }; + + auto compareProto = [&](TString* diff, const auto& a, const auto& b) -> bool { + using google::protobuf::util::MessageDifferencer; + MessageDifferencer d; + d.ReportDifferencesToString(diff); + d.set_repeated_field_comparison(MessageDifferencer::RepeatedFieldComparison::AS_SET); + return d.Compare(a, b); + }; + + ui64 txId = 100; + + // test body + + // 1. create object and fill it with data + const ui32 shardCount = 1; + { + TestCreateOlapStore(runtime, ++txId, "/MyRoot", Sprintf(R"( + Name: "Store" + ColumnShardCount: 1 + SchemaPresets { + Name: "default" + Schema { + Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } + Columns { Name: "data" Type: "Utf8" } + KeyColumnNames: "timestamp" + } + } + )", shardCount + )); + env.TestWaitNotification(runtime, txId); + + TestCreateColumnTable(runtime, ++txId, "/MyRoot/Store", Sprintf(R"( + Name: "ColumnTable" + ColumnShardCount: %d + Schema { + Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } + Columns { Name: "data" Type: "Utf8" } + KeyColumnNames: "timestamp" + } + )", shardCount + )); + env.TestWaitNotification(runtime, txId); + + ui64 pathId = 0; + ui64 shardId = 0; + { + auto describe = DescribePath(runtime, "/MyRoot/Store/ColumnTable"); + TestDescribeResult(describe, {NLs::PathExist}); + pathId = describe.GetPathId(); + const auto& sharding = describe.GetPathDescription().GetColumnTableDescription().GetSharding(); + shardId = sharding.GetColumnShards()[0]; + } + UNIT_ASSERT(shardId); + + { // Write data directly into shard + TActorId sender = runtime.AllocateEdgeActor(); + const ui32 rowsInBatch = 100000; + + const TVector ydbSchema = { + NArrow::NTest::TTestColumn("timestamp", NScheme::TTypeInfo(NScheme::NTypeIds::Timestamp)).SetNullable(false), + NArrow::NTest::TTestColumn("data", NScheme::TTypeInfo(NScheme::NTypeIds::Utf8) ) + }; + const auto& data = NTxUT::MakeTestBlob({ 0, rowsInBatch }, ydbSchema, {}, { "timestamp" }); + ui64 writeId = 0; + std::vector writeIds; + ++txId; + NTxUT::WriteData(runtime, sender, shardId, ++writeId, pathId, data, ydbSchema, &writeIds, NEvWrite::EModificationType::Upsert, txId); + NTxUT::TPlanStep planStep = NTxUT::ProposeCommit(runtime, sender, shardId, txId, writeIds, txId); + NTxUT::PlanCommit(runtime, sender, shardId, planStep, { txId }); + + } + } + + // 2. wait for all shard stats to be processed + waitForFullStatsUpdate(shardCount); + + // 3. check that disk space usage at subdomain level and column store level is the same + auto getUsage = [](const auto& describe) { + return std::make_pair( + describe.GetPathDescription().GetDomainDescription().GetDiskSpaceUsage(), + describe.GetPathDescription().GetTableStats() + ); + }; + auto describeBefore = DescribePath(runtime, "/MyRoot/Store"); + const auto& [subdomainDiskUsageBefore, storeUsageBefore] = getUsage(describeBefore); + UNIT_ASSERT_GT_C(subdomainDiskUsageBefore.GetTables().GetDataSize(), 0, subdomainDiskUsageBefore.DebugString()); + UNIT_ASSERT_GT_C(storeUsageBefore.GetDataSize(), 0, storeUsageBefore.DebugString()); + UNIT_ASSERT_VALUES_EQUAL(subdomainDiskUsageBefore.GetTables().GetDataSize(), storeUsageBefore.GetDataSize()); + + // 4. reboot schemeshard + RebootTablet(runtime, TTestTxConfig::SchemeShard, sender); + + // 5. wait for all shard stats to be processed + waitForFullStatsUpdate(shardCount); + + // 6. check that disk space usage levels is the same as before reboot + auto describeAfter = DescribePath(runtime, "/MyRoot/Store"); + const auto& [subdomainDiskUsageAfter, storeUsageAfter] = getUsage(describeAfter); + TString diff; + UNIT_ASSERT_C(compareProto(&diff, subdomainDiskUsageBefore, subdomainDiskUsageAfter), diff); + UNIT_ASSERT_C(compareProto(&diff, storeUsageBefore, storeUsageAfter), diff); + UNIT_ASSERT_VALUES_EQUAL(subdomainDiskUsageAfter.GetTables().GetDataSize(), storeUsageAfter.GetDataSize()); + } + + Y_UNIT_TEST_FLAGS(DiskSpaceUsageWithStandaloneColumnTable, DisableStatsBatching, EnablePersistentPartitionStats) { + TTestBasicRuntime runtime; + + TTestEnvOptions opts; + opts.DisableStatsBatching(DisableStatsBatching); + opts.EnablePersistentPartitionStats(EnablePersistentPartitionStats); + opts.EnableBackgroundCompaction(false); // make sure background compaction will not interfere + opts.DataShardStatsReportIntervalSeconds(0); // make sure stats will be reported swiftly + + TSchemeShard* schemeshard = nullptr; + TTestEnv env(runtime, opts, + /*TSchemeShardFactory ssFactory*/ + [&schemeshard](const TActorId& tablet, TTabletStorageInfo* info) { + schemeshard = new TSchemeShard(tablet, info); + Cerr << "TEST create schemeshard, " << (void*)schemeshard << Endl; + return schemeshard; + } + ); + + NDataShard::gDbStatsDataSizeResolution = 1; + NDataShard::gDbStatsRowCountResolution = 1; + + if (DisableStatsBatching == false) { + runtime.GetAppData().SchemeShardConfig.SetStatsMaxBatchSize(2); + } + + const auto sender = runtime.AllocateEdgeActor(); + + auto waitForFullStatsUpdate = [&](const ui32 count) { + ui64 statsCountBaseline = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get(); + TDispatchOptions options; + options.CustomFinalCondition = [&]() { + auto statsCount = schemeshard->TabletCounters->Cumulative()[COUNTER_STATS_WRITTEN].Get() - statsCountBaseline; + Cerr << "TEST waitForFullStatsUpdate, schemeshard " << (void*)schemeshard << ", stats written " << statsCount << Endl; + return statsCount >= count; + }; + runtime.DispatchEvents(options); + }; + + auto compareProto = [&](TString* diff, const auto& a, const auto& b) -> bool { + using google::protobuf::util::MessageDifferencer; + MessageDifferencer d; + d.ReportDifferencesToString(diff); + d.set_repeated_field_comparison(MessageDifferencer::RepeatedFieldComparison::AS_SET); + return d.Compare(a, b); + }; + + ui64 txId = 100; + + // test body + + // 1. create object and fill it with data + const ui32 shardCount = 1; + { + TestCreateColumnTable(runtime, ++txId, "/MyRoot", Sprintf(R"( + Name: "ColumnTable" + ColumnShardCount: %d + Schema { + Columns { Name: "timestamp" Type: "Timestamp" NotNull: true } + Columns { Name: "data" Type: "Utf8" } + KeyColumnNames: "timestamp" + } + )", shardCount + )); + env.TestWaitNotification(runtime, txId); + + ui64 pathId = 0; + ui64 shardId = 0; + { + auto describe = DescribePath(runtime, "/MyRoot/ColumnTable"); + TestDescribeResult(describe, {NLs::PathExist}); + pathId = describe.GetPathId(); + const auto& sharding = describe.GetPathDescription().GetColumnTableDescription().GetSharding(); + shardId = sharding.GetColumnShards()[0]; + } + UNIT_ASSERT(shardId); + + { // Write data directly into shard + TActorId sender = runtime.AllocateEdgeActor(); + const ui32 rowsInBatch = 100000; + + const TVector ydbSchema = { + NArrow::NTest::TTestColumn("timestamp", NScheme::TTypeInfo(NScheme::NTypeIds::Timestamp)).SetNullable(false), + NArrow::NTest::TTestColumn("data", NScheme::TTypeInfo(NScheme::NTypeIds::Utf8) ) + }; + const auto& data = NTxUT::MakeTestBlob({ 0, rowsInBatch }, ydbSchema, {}, { "timestamp" }); + ui64 writeId = 0; + std::vector writeIds; + ++txId; + NTxUT::WriteData(runtime, sender, shardId, ++writeId, pathId, data, ydbSchema, &writeIds, NEvWrite::EModificationType::Upsert, txId); + NTxUT::TPlanStep planStep = NTxUT::ProposeCommit(runtime, sender, shardId, txId, writeIds, txId); + NTxUT::PlanCommit(runtime, sender, shardId, planStep, { txId }); + + } + } + + // 2. wait for all shard stats to be processed + waitForFullStatsUpdate(shardCount); + + // 3. check that disk space usage at subdomain level and column store level is the same + auto getUsage = [](const auto& describe) { + return std::make_pair( + describe.GetPathDescription().GetDomainDescription().GetDiskSpaceUsage(), + describe.GetPathDescription().GetTableStats() + ); + }; + auto describeBefore = DescribePath(runtime, "/MyRoot/ColumnTable"); + const auto& [subdomainDiskUsageBefore, storeUsageBefore] = getUsage(describeBefore); + UNIT_ASSERT_GT_C(subdomainDiskUsageBefore.GetTables().GetDataSize(), 0, subdomainDiskUsageBefore.DebugString()); + UNIT_ASSERT_GT_C(storeUsageBefore.GetDataSize(), 0, storeUsageBefore.DebugString()); + UNIT_ASSERT_VALUES_EQUAL(subdomainDiskUsageBefore.GetTables().GetDataSize(), storeUsageBefore.GetDataSize()); + + // 4. reboot schemeshard + RebootTablet(runtime, TTestTxConfig::SchemeShard, sender); + + // 5. wait for all shard stats to be processed + waitForFullStatsUpdate(shardCount); + + // 6. check that disk space usage levels is the same as before reboot + auto describeAfter = DescribePath(runtime, "/MyRoot/ColumnTable"); + const auto& [subdomainDiskUsageAfter, storeUsageAfter] = getUsage(describeAfter); + TString diff; + UNIT_ASSERT_C(compareProto(&diff, subdomainDiskUsageBefore, subdomainDiskUsageAfter), diff); + UNIT_ASSERT_C(compareProto(&diff, storeUsageBefore, storeUsageAfter), diff); + UNIT_ASSERT_VALUES_EQUAL(subdomainDiskUsageAfter.GetTables().GetDataSize(), storeUsageAfter.GetDataSize()); + } + + //TODO: add DiskSpaceUsage test for topics + Y_UNIT_TEST(TableDiskSpaceQuotas) { TTestBasicRuntime runtime; TTestEnvOptions opts; diff --git a/ydb/core/tx/schemeshard/ut_subdomain/ya.make b/ydb/core/tx/schemeshard/ut_subdomain/ya.make index 7e491032fdb7..a258826d8a03 100644 --- a/ydb/core/tx/schemeshard/ut_subdomain/ya.make +++ b/ydb/core/tx/schemeshard/ut_subdomain/ya.make @@ -18,6 +18,8 @@ PEERDIR( ydb/core/testlib/default ydb/core/tx ydb/core/tx/schemeshard/ut_helpers + ydb/core/tx/columnshard/hooks/testing + ydb/core/tx/columnshard/test_helper yql/essentials/public/udf/service/exception_policy )