-
Notifications
You must be signed in to change notification settings - Fork 8.3k
Clickhouse Crash cause of Segment fault when execute insert into select #88417
Description
Describe what's wrong
Clickhouse crash because of segment fault, when execute a insert into select query
crash when query with SETTINGS group_by_overflow_mode = 'any' and won't crash with group_by_overflow_mode = 'break'
SQL as below
INSERT INTO testdb.target_table_buffer ( SrcIp, SrcPort, DestIp, DestPort, SegmentValue ) SELECT if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue) FROM testdb.source_table WHERE timestamp >= 1760160000 AND timestamp < 1760160060 GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort) SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000
Does it reproduce on the most recent release?
Yes
How to reproduce
- create testdb.source_table if not exists
CREATE TABLE testdb.source_table (timestampDateTime,SrcIpIPv6,SrcPortUInt16,DestIpIPv6,DestPortUInt16,SegmentValueUInt8,FlowDirectionUInt8 ) ENGINE = MergeTree ORDER BY (timestamp, SrcIp, SrcPort) SETTINGS index_granularity = 8192; - insert into sample data
dump.native.gz - create testdb.target_table_buffer
CREATE TABLE testdb.target_table_buffer (SrcIpIPv6,SrcPortUInt16,DestIpIPv6,DestPortUInt16,SegmentValueUInt8 ) ENGINE = Buffer('testdb', 'target_table', 16, 10, 100, 10000, 1000000, 10000000, 100000000); - create testdb.target_table if not exists
CREATE TABLE testdb.target_table (SrcIpIPv6,SrcPortUInt16,DestIpIPv6,DestPortUInt16,SegmentValueUInt8 ) ENGINE = MergeTree ORDER BY (SrcIp, SrcPort) SETTINGS index_granularity = 8192; - execute insert select sql
INSERT INTO testdb.target_table_buffer( SrcIp, SrcPort, DestIp, DestPort, SegmentValue ) SELECT if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue) FROM testdb.source_tableWHERE timestamp >= 1760160000 AND timestamp < 1760160060 GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort) SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000
Reproduce script
#!/bin/bash
# ClickHouse connection parameters
HOST=${1:-localhost}
PORT=${2:-9000}
USER=${3:-default}
PASSWORD=${4:-}
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# ClickHouse client command
if [ -n "$PASSWORD" ]; then
CH_CLIENT="clickhouse-client --host=$HOST --port=$PORT --user=$USER --password=$PASSWORD"
else
CH_CLIENT="clickhouse-client --host=$HOST --port=$PORT --user=$USER"
fi
echo "=== ClickHouse Reproduction Script ==="
echo "Connection info: $HOST:$PORT (user: $USER)"
echo "Script directory: $SCRIPT_DIR"
echo
# Check if required files exist
required_files=("source_table_ddl.sql" "target_table_ddl.sql" "target_buffer_table_ddl.sql" "insert.sql" "dump.native.gz")
for file in "${required_files[@]}"; do
if [ ! -f "$SCRIPT_DIR/$file" ]; then
echo "Error: Required file $file not found"
exit 1
fi
done
# 1. Create database if not exists
echo "Step 1: Create database testdb (if not exists)"
$CH_CLIENT --query="CREATE DATABASE IF NOT EXISTS testdb" || {
echo "Error: Failed to create database"
exit 1
}
echo "✓ Database created successfully"
echo
# 2. Drop existing tables if they exist
echo "Step 2: Drop existing tables"
tables=("testdb.target_table_buffer" "testdb.target_table" "testdb.source_table")
for table in "${tables[@]}"; do
echo "Dropping table: $table"
$CH_CLIENT --query="DROP TABLE IF EXISTS $table" || {
echo "Warning: Failed to drop table $table"
}
done
echo "✓ Tables dropped successfully"
echo
# 3. Create source table
echo "Step 3: Create source table"
$CH_CLIENT --query="$(cat "$SCRIPT_DIR/source_table_ddl.sql")" || {
echo "Error: Failed to create source table"
exit 1
}
echo "✓ Source table created successfully"
echo
# 4. Import data to source table
echo "Step 4: Import data to source table"
echo "Decompressing and importing dump.native.gz..."
gzip -dc "$SCRIPT_DIR/dump.native.gz" | $CH_CLIENT --query="INSERT INTO testdb.source_table FORMAT Native" || {
echo "Error: Failed to import data to source table"
exit 1
}
# Check imported data count
row_count=$($CH_CLIENT --query="SELECT count() FROM testdb.source_table")
echo "✓ Data import completed, imported $row_count rows"
echo
# 5. Create target table
echo "Step 5: Create target table"
$CH_CLIENT --query="$(cat "$SCRIPT_DIR/target_table_ddl.sql")" || {
echo "Error: Failed to create target table"
exit 1
}
echo "✓ Target table created successfully"
echo
# 6. Create target buffer table
echo "Step 6: Create target buffer table"
$CH_CLIENT --query="$(cat "$SCRIPT_DIR/target_buffer_table_ddl.sql")" || {
echo "Error: Failed to create target buffer table"
exit 1
}
echo "✓ Target buffer table created successfully"
echo
# 7. Execute insert operation
echo "Step 7: Execute data insert operation"
echo "Executing insert.sql..."
# Record start time
start_time=$(date +%s)
# Execute insert operation and capture output
insert_output=$($CH_CLIENT --query="$(cat "$SCRIPT_DIR/insert.sql")" 2>&1)
insert_exit_code=$?
# Record end time
end_time=$(date +%s)
duration=$((end_time - start_time))
# 8. Check execution result
echo "Step 8: Check execution result"
echo "Execution time: ${duration} seconds"
if [ $insert_exit_code -eq 0 ]; then
echo "✓ Insert operation executed successfully"
# Check inserted data count
buffer_count=$($CH_CLIENT --query="SELECT count() FROM testdb.target_table_buffer")
target_count=$($CH_CLIENT --query="SELECT count() FROM testdb.target_table")
echo "Data count in buffer table: $buffer_count"
echo "Data count in target table: $target_count"
if [ -n "$insert_output" ]; then
echo "Execution output:"
echo "$insert_output"
fi
else
echo "✗ Insert operation failed (exit code: $insert_exit_code)"
echo "Error output:"
echo "$insert_output"
exit 1
fi
echo
echo "=== Script execution completed ==="
Expected behavior
won't crash
Error message and/or stacktrace
2025.10.13 10:56:59.465091 [ 79454 ] BaseDaemon: (version 25.8.8.26 (official build), build id: 1275142F51AB448D8B03351ECF19B4739B97B158, git hash: 8a24750) (from thread 80132) (query_id: 7a7ea968-b5af-425c-8fa8-2033806bec01) (query: INSERT INTO testdb.target_table_buffer (
SrcIp, SrcPort, DestIp, DestPort, SegmentValue
)
SELECT
if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue)
FROM testdb.source_table
WHERE timestamp >= 1760160000 AND timestamp < 1760160060
GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort)
SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000) Received signal Segmentation fault (11)
[localhost.localdomain] 2025.10.13 10:56:59.465129 [ 79454 ] BaseDaemon: Address: 0x1. Access: read. Address not mapped to object.
[localhost.localdomain] 2025.10.13 10:56:59.465163 [ 79454 ] BaseDaemon: Stack trace: 0x0000000014846d2d 0x00000000177804d0 0x00000000178e652f 0x00000000178e5474 0x000000001777a777 0x000000001775296c 0x0000000017782b62 0x0000000019d90a48 0x0000000019ae0f02 0x0000000019ad2a50 0x0000000019ad6903 0x000000001350a7eb 0x0000000013511b66 0x00000000135077d2 0x000000001350f29a 0x00007f1564d85ea5 0x00007f1564aae96d
[localhost.localdomain] 2025.10.13 10:56:59.465248 [ 79454 ] BaseDaemon: 2. DB::IAggregateFunctionHelper<DB::(anonymous namespace)::AggregateFunctionAny<DB::SingleValueDataFixed<char8_t>>>::addBatchSparse(unsigned long, unsigned long, char**, unsigned long, DB::IColumn const**, DB::Arena*) const @ 0x0000000014846d2d
[localhost.localdomain] 2025.10.13 10:56:59.465303 [ 79454 ] BaseDaemon: 3. DB::Aggregator::executeAggregateInstructions(DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, std::unique_ptr<char* [], std::default_delete<char* []>> const&, unsigned long, bool, bool, bool) const @ 0x00000000177804d0
[localhost.localdomain] 2025.10.13 10:56:59.465348 [ 79454 ] BaseDaemon: 4. void DB::Aggregator::executeImplBatch<false, DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, bool, char*) const @ 0x00000000178e652f
[localhost.localdomain] 2025.10.13 10:56:59.465392 [ 79454 ] BaseDaemon: 5. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x00000000178e5474
[localhost.localdomain] 2025.10.13 10:56:59.465441 [ 79454 ] BaseDaemon: 6. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::Arena*, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, DB::ColumnsHashing::LastElementCacheStats&, bool, bool, char*) const @ 0x000000001777a777
[localhost.localdomain] 2025.10.13 10:56:59.465497 [ 79454 ] BaseDaemon: 7. DB::Aggregator::executeImpl(DB::AggregatedDataVariants&, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x000000001775296c
[localhost.localdomain] 2025.10.13 10:56:59.465541 [ 79454 ] BaseDaemon: 8. DB::Aggregator::executeOnBlock(std::vector<COWDB::IColumn::immutable_ptrDB::IColumn, std::allocator<COWDB::IColumn::immutable_ptrDB::IColumn>>, unsigned long, unsigned long, DB::AggregatedDataVariants&, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, std::vector<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>, std::allocator<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>>>&, bool&) const @ 0x0000000017782b62
[localhost.localdomain] 2025.10.13 10:56:59.465577 [ 79454 ] BaseDaemon: 9. DB::AggregatingTransform::work() @ 0x0000000019d90a48
[localhost.localdomain] 2025.10.13 10:56:59.465624 [ 79454 ] BaseDaemon: 10. DB::ExecutionThreadContext::executeTask() @ 0x0000000019ae0f02
[localhost.localdomain] 2025.10.13 10:56:59.465669 [ 79454 ] BaseDaemon: 11. DB::PipelineExecutor::executeStepImpl(unsigned long, DB::IAcquiredSlot*, std::atomic) @ 0x0000000019ad2a50
[localhost.localdomain] 2025.10.13 10:56:59.465713 [ 79454 ] BaseDaemon: 12. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<DB::PipelineExecutor::spawnThreads(std::shared_ptrDB::IAcquiredSlot)::$_0, void ()>>(std::__function::__policy_storage const) @ 0x0000000019ad6903
[localhost.localdomain] 2025.10.13 10:56:59.465758 [ 79454 ] BaseDaemon: 13. ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::worker() @ 0x000000001350a7eb
[localhost.localdomain] 2025.10.13 10:56:59.465797 [ 79454 ] BaseDaemon: 14. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<ThreadFromGlobalPoolImpl<false, true>::ThreadFromGlobalPoolImpl<void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool>(void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::&&)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x0000000013511b66
[localhost.localdomain] 2025.10.13 10:56:59.465834 [ 79454 ] BaseDaemon: 15. ThreadPoolImplstd::thread::ThreadFromThreadPool::worker() @ 0x00000000135077d2
[localhost.localdomain] 2025.10.13 10:56:59.465879 [ 79454 ] BaseDaemon: 16. void* std::__thread_proxy[abi:ne190107]<std::tuple<std::unique_ptr<std::__thread_struct, std::default_deletestd::__thread_struct>, void (ThreadPoolImplstd::thread::ThreadFromThreadPool::)(), ThreadPoolImplstd::thread::ThreadFromThreadPool>>(void*) @ 0x000000001350f29a
[localhost.localdomain] 2025.10.13 10:56:59.465934 [ 79454 ] BaseDaemon: 17. start_thread @ 0x0000000000007ea5
[localhost.localdomain] 2025.10.13 10:56:59.465992 [ 79454 ] BaseDaemon: 18. __clone @ 0x00000000000fe96d
[localhost.localdomain] 2025.10.13 10:56:59.730678 [ 79454 ] BaseDaemon: Integrity check of the executable successfully passed (checksum: C9F4DE12801BA5BB5CA0888A285B0309)
[localhost.localdomain] 2025.10.13 10:56:59.733321 [ 79454 ] BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues
[localhost.localdomain] 2025.10.13 10:56:59.733450 [ 79454 ] BaseDaemon: Changed settings: max_threads = 5, max_query_size = 10485760, use_uncompressed_cache = false, os_thread_priority = 1, max_rows_to_group_by = 1000000, group_by_overflow_mode = 'any', max_ast_elements = 10000000, max_expanded_ast_elements = 10000000, max_memory_usage = 20000000000, parallel_view_processing = false, optimize_aggregation_in_order = true, optimize_on_insert = false, parallel_replicas_for_cluster_engines = false, background_pool_size = 36
[localhost.localdomain] 2025.10.13 10:56:59.733588 [ 79454 ] BaseDaemon: ########################################
[localhost.localdomain] 2025.10.13 10:56:59.733599 [ 79454 ] BaseDaemon: (version 25.8.8.26 (official build), build id: 1275142F51AB448D8B03351ECF19B4739B97B158, git hash: 8a24750) (from thread 80219) (query_id: 7a7ea968-b5af-425c-8fa8-2033806bec01) (query: INSERT INTO testdb.target_table_buffer (
SrcIp, SrcPort, DestIp, DestPort, SegmentValue
)
SELECT
if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue)
FROM testdb.source_table
WHERE timestamp >= 1760160000 AND timestamp < 1760160060
GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort)
SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000) Received signal Segmentation fault (11)
[localhost.localdomain] 2025.10.13 10:56:59.733612 [ 79454 ] BaseDaemon: Address: 0x1. Access: read. Address not mapped to object.
[localhost.localdomain] 2025.10.13 10:56:59.733622 [ 79454 ] BaseDaemon: Stack trace: 0x0000000014846d2d 0x00000000177804d0 0x00000000178e652f 0x00000000178e5474 0x000000001777a777 0x000000001775296c 0x0000000017782b62 0x0000000019d90a48 0x0000000019ae0f02 0x0000000019ad2a50 0x0000000019ad6903 0x000000001350a7eb 0x0000000013511b66 0x00000000135077d2 0x000000001350f29a 0x00007f1564d85ea5 0x00007f1564aae96d
[localhost.localdomain] 2025.10.13 10:56:59.733661 [ 79454 ] BaseDaemon: 2. DB::IAggregateFunctionHelper<DB::(anonymous namespace)::AggregateFunctionAny<DB::SingleValueDataFixed<char8_t>>>::addBatchSparse(unsigned long, unsigned long, char**, unsigned long, DB::IColumn const**, DB::Arena*) const @ 0x0000000014846d2d
[localhost.localdomain] 2025.10.13 10:56:59.733683 [ 79454 ] BaseDaemon: 3. DB::Aggregator::executeAggregateInstructions(DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, std::unique_ptr<char* [], std::default_delete<char* []>> const&, unsigned long, bool, bool, bool) const @ 0x00000000177804d0
[localhost.localdomain] 2025.10.13 10:56:59.733707 [ 79454 ] BaseDaemon: 4. void DB::Aggregator::executeImplBatch<false, DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, bool, char*) const @ 0x00000000178e652f
[localhost.localdomain] 2025.10.13 10:56:59.733728 [ 79454 ] BaseDaemon: 5. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x00000000178e5474
[localhost.localdomain] 2025.10.13 10:56:59.733753 [ 79454 ] BaseDaemon: 6. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::Arena*, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, DB::ColumnsHashing::LastElementCacheStats&, bool, bool, char*) const @ 0x000000001777a777
[localhost.localdomain] 2025.10.13 10:56:59.733771 [ 79454 ] BaseDaemon: 7. DB::Aggregator::executeImpl(DB::AggregatedDataVariants&, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x000000001775296c
[localhost.localdomain] 2025.10.13 10:56:59.733790 [ 79454 ] BaseDaemon: 8. DB::Aggregator::executeOnBlock(std::vector<COWDB::IColumn::immutable_ptrDB::IColumn, std::allocator<COWDB::IColumn::immutable_ptrDB::IColumn>>, unsigned long, unsigned long, DB::AggregatedDataVariants&, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, std::vector<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>, std::allocator<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>>>&, bool&) const @ 0x0000000017782b62
[localhost.localdomain] 2025.10.13 10:56:59.733807 [ 79454 ] BaseDaemon: 9. DB::AggregatingTransform::work() @ 0x0000000019d90a48
[localhost.localdomain] 2025.10.13 10:56:59.733822 [ 79454 ] BaseDaemon: 10. DB::ExecutionThreadContext::executeTask() @ 0x0000000019ae0f02
[localhost.localdomain] 2025.10.13 10:56:59.733840 [ 79454 ] BaseDaemon: 11. DB::PipelineExecutor::executeStepImpl(unsigned long, DB::IAcquiredSlot*, std::atomic) @ 0x0000000019ad2a50
[localhost.localdomain] 2025.10.13 10:56:59.733861 [ 79454 ] BaseDaemon: 12. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<DB::PipelineExecutor::spawnThreads(std::shared_ptrDB::IAcquiredSlot)::$_0, void ()>>(std::__function::__policy_storage const) @ 0x0000000019ad6903
[localhost.localdomain] 2025.10.13 10:56:59.733883 [ 79454 ] BaseDaemon: 13. ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::worker() @ 0x000000001350a7eb
[localhost.localdomain] 2025.10.13 10:56:59.733908 [ 79454 ] BaseDaemon: 14. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<ThreadFromGlobalPoolImpl<false, true>::ThreadFromGlobalPoolImpl<void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool>(void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::&&)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x0000000013511b66
[localhost.localdomain] 2025.10.13 10:56:59.733923 [ 79454 ] BaseDaemon: 15. ThreadPoolImplstd::thread::ThreadFromThreadPool::worker() @ 0x00000000135077d2
[localhost.localdomain] 2025.10.13 10:56:59.733947 [ 79454 ] BaseDaemon: 16. void* std::__thread_proxy[abi:ne190107]<std::tuple<std::unique_ptr<std::__thread_struct, std::default_deletestd::__thread_struct>, void (ThreadPoolImplstd::thread::ThreadFromThreadPool::)(), ThreadPoolImplstd::thread::ThreadFromThreadPool>>(void*) @ 0x000000001350f29a
[localhost.localdomain] 2025.10.13 10:56:59.733987 [ 79454 ] BaseDaemon: 17. start_thread @ 0x0000000000007ea5
[localhost.localdomain] 2025.10.13 10:56:59.734034 [ 79454 ] BaseDaemon: 18. __clone @ 0x00000000000fe96d
[localhost.localdomain] 2025.10.13 10:56:59.998626 [ 79454 ] BaseDaemon: Integrity check of the executable successfully passed (checksum: C9F4DE12801BA5BB5CA0888A285B0309)
[localhost.localdomain] 2025.10.13 10:57:00.000481 [ 79454 ] BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues
[localhost.localdomain] 2025.10.13 10:57:00.000588 [ 79454 ] BaseDaemon: Changed settings: max_threads = 5, max_query_size = 10485760, use_uncompressed_cache = false, os_thread_priority = 1, max_rows_to_group_by = 1000000, group_by_overflow_mode = 'any', max_ast_elements = 10000000, max_expanded_ast_elements = 10000000, max_memory_usage = 20000000000, parallel_view_processing = false, optimize_aggregation_in_order = true, optimize_on_insert = false, parallel_replicas_for_cluster_engines = false, background_pool_size = 36
[localhost.localdomain] 2025.10.13 10:57:00.000747 [ 79454 ] BaseDaemon: ########################################
[localhost.localdomain] 2025.10.13 10:57:00.000764 [ 79454 ] BaseDaemon: (version 25.8.8.26 (official build), build id: 1275142F51AB448D8B03351ECF19B4739B97B158, git hash: 8a24750) (from thread 80095) (query_id: 7a7ea968-b5af-425c-8fa8-2033806bec01) (query: INSERT INTO testdb.target_table_buffer (
SrcIp, SrcPort, DestIp, DestPort, SegmentValue
)
SELECT
if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue)
FROM testdb.source_table
WHERE timestamp >= 1760160000 AND timestamp < 1760160060
GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort)
SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000) Received signal Segmentation fault (11)
[localhost.localdomain] 2025.10.13 10:57:00.000784 [ 79454 ] BaseDaemon: Address: 0x1. Access: read. Address not mapped to object.
[localhost.localdomain] 2025.10.13 10:57:00.000792 [ 79454 ] BaseDaemon: Stack trace: 0x0000000014846d2d 0x00000000177804d0 0x00000000178e652f 0x00000000178e5474 0x000000001777a777 0x000000001775296c 0x0000000017782b62 0x0000000019d90a48 0x0000000019ae0f02 0x0000000019ad2a50 0x0000000019ad6903 0x000000001350a7eb 0x0000000013511b66 0x00000000135077d2 0x000000001350f29a 0x00007f1564d85ea5 0x00007f1564aae96d
[localhost.localdomain] 2025.10.13 10:57:00.000839 [ 79454 ] BaseDaemon: 2. DB::IAggregateFunctionHelper<DB::(anonymous namespace)::AggregateFunctionAny<DB::SingleValueDataFixed<char8_t>>>::addBatchSparse(unsigned long, unsigned long, char**, unsigned long, DB::IColumn const**, DB::Arena*) const @ 0x0000000014846d2d
[localhost.localdomain] 2025.10.13 10:57:00.000873 [ 79454 ] BaseDaemon: 3. DB::Aggregator::executeAggregateInstructions(DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, std::unique_ptr<char* [], std::default_delete<char* []>> const&, unsigned long, bool, bool, bool) const @ 0x00000000177804d0
[localhost.localdomain] 2025.10.13 10:57:00.000909 [ 79454 ] BaseDaemon: 4. void DB::Aggregator::executeImplBatch<false, DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, bool, char*) const @ 0x00000000178e652f
[localhost.localdomain] 2025.10.13 10:57:00.000959 [ 79454 ] BaseDaemon: 5. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x00000000178e5474
[localhost.localdomain] 2025.10.13 10:57:00.000987 [ 79454 ] BaseDaemon: 6. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::Arena*, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, DB::ColumnsHashing::LastElementCacheStats&, bool, bool, char*) const @ 0x000000001777a777
[localhost.localdomain] 2025.10.13 10:57:00.001009 [ 79454 ] BaseDaemon: 7. DB::Aggregator::executeImpl(DB::AggregatedDataVariants&, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x000000001775296c
[localhost.localdomain] 2025.10.13 10:57:00.001037 [ 79454 ] BaseDaemon: 8. DB::Aggregator::executeOnBlock(std::vector<COWDB::IColumn::immutable_ptrDB::IColumn, std::allocator<COWDB::IColumn::immutable_ptrDB::IColumn>>, unsigned long, unsigned long, DB::AggregatedDataVariants&, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, std::vector<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>, std::allocator<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>>>&, bool&) const @ 0x0000000017782b62
[localhost.localdomain] 2025.10.13 10:57:00.001061 [ 79454 ] BaseDaemon: 9. DB::AggregatingTransform::work() @ 0x0000000019d90a48
[localhost.localdomain] 2025.10.13 10:57:00.001080 [ 79454 ] BaseDaemon: 10. DB::ExecutionThreadContext::executeTask() @ 0x0000000019ae0f02
[localhost.localdomain] 2025.10.13 10:57:00.001101 [ 79454 ] BaseDaemon: 11. DB::PipelineExecutor::executeStepImpl(unsigned long, DB::IAcquiredSlot*, std::atomic) @ 0x0000000019ad2a50
[localhost.localdomain] 2025.10.13 10:57:00.001126 [ 79454 ] BaseDaemon: 12. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<DB::PipelineExecutor::spawnThreads(std::shared_ptrDB::IAcquiredSlot)::$_0, void ()>>(std::__function::__policy_storage const) @ 0x0000000019ad6903
[localhost.localdomain] 2025.10.13 10:57:00.001150 [ 79454 ] BaseDaemon: 13. ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::worker() @ 0x000000001350a7eb
[localhost.localdomain] 2025.10.13 10:57:00.001179 [ 79454 ] BaseDaemon: 14. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<ThreadFromGlobalPoolImpl<false, true>::ThreadFromGlobalPoolImpl<void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool>(void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::&&)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x0000000013511b66
[localhost.localdomain] 2025.10.13 10:57:00.001197 [ 79454 ] BaseDaemon: 15. ThreadPoolImplstd::thread::ThreadFromThreadPool::worker() @ 0x00000000135077d2
[localhost.localdomain] 2025.10.13 10:57:00.001225 [ 79454 ] BaseDaemon: 16. void* std::__thread_proxy[abi:ne190107]<std::tuple<std::unique_ptr<std::__thread_struct, std::default_deletestd::__thread_struct>, void (ThreadPoolImplstd::thread::ThreadFromThreadPool::)(), ThreadPoolImplstd::thread::ThreadFromThreadPool>>(void*) @ 0x000000001350f29a
[localhost.localdomain] 2025.10.13 10:57:00.001258 [ 79454 ] BaseDaemon: 17. start_thread @ 0x0000000000007ea5
[localhost.localdomain] 2025.10.13 10:57:00.001312 [ 79454 ] BaseDaemon: 18. __clone @ 0x00000000000fe96d
[localhost.localdomain] 2025.10.13 10:57:00.271799 [ 79454 ] BaseDaemon: Integrity check of the executable successfully passed (checksum: C9F4DE12801BA5BB5CA0888A285B0309)
[localhost.localdomain] 2025.10.13 10:57:00.273596 [ 79454 ] BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues
[localhost.localdomain] 2025.10.13 10:57:00.273697 [ 79454 ] BaseDaemon: Changed settings: max_threads = 5, max_query_size = 10485760, use_uncompressed_cache = false, os_thread_priority = 1, max_rows_to_group_by = 1000000, group_by_overflow_mode = 'any', max_ast_elements = 10000000, max_expanded_ast_elements = 10000000, max_memory_usage = 20000000000, parallel_view_processing = false, optimize_aggregation_in_order = true, optimize_on_insert = false, parallel_replicas_for_cluster_engines = false, background_pool_size = 36
[localhost.localdomain] 2025.10.13 10:57:00.273815 [ 79454 ] BaseDaemon: ########################################
[localhost.localdomain] 2025.10.13 10:57:00.273828 [ 79454 ] BaseDaemon: (version 25.8.8.26 (official build), build id: 1275142F51AB448D8B03351ECF19B4739B97B158, git hash: 8a24750) (from thread 80195) (query_id: 7a7ea968-b5af-425c-8fa8-2033806bec01) (query: INSERT INTO testdb.target_table_buffer (
SrcIp, SrcPort, DestIp, DestPort, SegmentValue
)
SELECT
if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue)
FROM testdb.source_table
WHERE timestamp >= 1760160000 AND timestamp < 1760160060
GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort)
SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000) Received signal Segmentation fault (11)
[localhost.localdomain] 2025.10.13 10:57:00.273842 [ 79454 ] BaseDaemon: Address: 0x1. Access: read. Address not mapped to object.
[localhost.localdomain] 2025.10.13 10:57:00.273847 [ 79454 ] BaseDaemon: Stack trace: 0x0000000014846d2d 0x00000000177804d0 0x00000000178e652f 0x00000000178e5474 0x000000001777a777 0x000000001775296c 0x0000000017782b62 0x0000000019d90a48 0x0000000019ae0f02 0x0000000019ad2a50 0x0000000019ad6903 0x000000001350a7eb 0x0000000013511b66 0x00000000135077d2 0x000000001350f29a 0x00007f1564d85ea5 0x00007f1564aae96d
[localhost.localdomain] 2025.10.13 10:57:00.273904 [ 79454 ] BaseDaemon: 2. DB::IAggregateFunctionHelper<DB::(anonymous namespace)::AggregateFunctionAny<DB::SingleValueDataFixed<char8_t>>>::addBatchSparse(unsigned long, unsigned long, char**, unsigned long, DB::IColumn const**, DB::Arena*) const @ 0x0000000014846d2d
[localhost.localdomain] 2025.10.13 10:57:00.273930 [ 79454 ] BaseDaemon: 3. DB::Aggregator::executeAggregateInstructions(DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, std::unique_ptr<char* [], std::default_delete<char* []>> const&, unsigned long, bool, bool, bool) const @ 0x00000000177804d0
[localhost.localdomain] 2025.10.13 10:57:00.273955 [ 79454 ] BaseDaemon: 4. void DB::Aggregator::executeImplBatch<false, DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, bool, char*) const @ 0x00000000178e652f
[localhost.localdomain] 2025.10.13 10:57:00.273978 [ 79454 ] BaseDaemon: 5. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x00000000178e5474
[localhost.localdomain] 2025.10.13 10:57:00.274002 [ 79454 ] BaseDaemon: 6. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::Arena*, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, DB::ColumnsHashing::LastElementCacheStats&, bool, bool, char*) const @ 0x000000001777a777
[localhost.localdomain] 2025.10.13 10:57:00.274022 [ 79454 ] BaseDaemon: 7. DB::Aggregator::executeImpl(DB::AggregatedDataVariants&, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x000000001775296c
[localhost.localdomain] 2025.10.13 10:57:00.274044 [ 79454 ] BaseDaemon: 8. DB::Aggregator::executeOnBlock(std::vector<COWDB::IColumn::immutable_ptrDB::IColumn, std::allocator<COWDB::IColumn::immutable_ptrDB::IColumn>>, unsigned long, unsigned long, DB::AggregatedDataVariants&, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, std::vector<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>, std::allocator<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>>>&, bool&) const @ 0x0000000017782b62
[localhost.localdomain] 2025.10.13 10:57:00.274061 [ 79454 ] BaseDaemon: 9. DB::AggregatingTransform::work() @ 0x0000000019d90a48
[localhost.localdomain] 2025.10.13 10:57:00.274078 [ 79454 ] BaseDaemon: 10. DB::ExecutionThreadContext::executeTask() @ 0x0000000019ae0f02
[localhost.localdomain] 2025.10.13 10:57:00.274092 [ 79454 ] BaseDaemon: 11. DB::PipelineExecutor::executeStepImpl(unsigned long, DB::IAcquiredSlot*, std::atomic) @ 0x0000000019ad2a50
[localhost.localdomain] 2025.10.13 10:57:00.274113 [ 79454 ] BaseDaemon: 12. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<DB::PipelineExecutor::spawnThreads(std::shared_ptrDB::IAcquiredSlot)::$_0, void ()>>(std::__function::__policy_storage const) @ 0x0000000019ad6903
[localhost.localdomain] 2025.10.13 10:57:00.274130 [ 79454 ] BaseDaemon: 13. ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::worker() @ 0x000000001350a7eb
[localhost.localdomain] 2025.10.13 10:57:00.274153 [ 79454 ] BaseDaemon: 14. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<ThreadFromGlobalPoolImpl<false, true>::ThreadFromGlobalPoolImpl<void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool>(void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::&&)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x0000000013511b66
[localhost.localdomain] 2025.10.13 10:57:00.274184 [ 79454 ] BaseDaemon: 15. ThreadPoolImplstd::thread::ThreadFromThreadPool::worker() @ 0x00000000135077d2
[localhost.localdomain] 2025.10.13 10:57:00.274202 [ 79454 ] BaseDaemon: 16. void* std::__thread_proxy[abi:ne190107]<std::tuple<std::unique_ptr<std::__thread_struct, std::default_deletestd::__thread_struct>, void (ThreadPoolImplstd::thread::ThreadFromThreadPool::)(), ThreadPoolImplstd::thread::ThreadFromThreadPool>>(void*) @ 0x000000001350f29a
[localhost.localdomain] 2025.10.13 10:57:00.274231 [ 79454 ] BaseDaemon: 17. start_thread @ 0x0000000000007ea5
[localhost.localdomain] 2025.10.13 10:57:00.274264 [ 79454 ] BaseDaemon: 18. __clone @ 0x00000000000fe96d
[localhost.localdomain] 2025.10.13 10:57:00.538885 [ 79454 ] BaseDaemon: Integrity check of the executable successfully passed (checksum: C9F4DE12801BA5BB5CA0888A285B0309)
[localhost.localdomain] 2025.10.13 10:57:00.540651 [ 79454 ] BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues
[localhost.localdomain] 2025.10.13 10:57:00.540737 [ 79454 ] BaseDaemon: Changed settings: max_threads = 5, max_query_size = 10485760, use_uncompressed_cache = false, os_thread_priority = 1, max_rows_to_group_by = 1000000, group_by_overflow_mode = 'any', max_ast_elements = 10000000, max_expanded_ast_elements = 10000000, max_memory_usage = 20000000000, parallel_view_processing = false, optimize_aggregation_in_order = true, optimize_on_insert = false, parallel_replicas_for_cluster_engines = false, background_pool_size = 36
[localhost.localdomain] 2025.10.13 10:57:00.540851 [ 79454 ] BaseDaemon: ########################################
[localhost.localdomain] 2025.10.13 10:57:00.540865 [ 79454 ] BaseDaemon: (version 25.8.8.26 (official build), build id: 1275142F51AB448D8B03351ECF19B4739B97B158, git hash: 8a24750) (from thread 80144) (query_id: 7a7ea968-b5af-425c-8fa8-2033806bec01) (query: INSERT INTO testdb.target_table_buffer (
SrcIp, SrcPort, DestIp, DestPort, SegmentValue
)
SELECT
if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue)
FROM testdb.source_table
WHERE timestamp >= 1760160000 AND timestamp < 1760160060
GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort)
SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000) Received signal Segmentation fault (11)
[localhost.localdomain] 2025.10.13 10:57:00.540876 [ 79454 ] BaseDaemon: Address: 0x1. Access: read. Address not mapped to object.
[localhost.localdomain] 2025.10.13 10:57:00.540888 [ 79454 ] BaseDaemon: Stack trace: 0x0000000014846d2d 0x00000000177804d0 0x00000000178e652f 0x00000000178e5474 0x000000001777a777 0x000000001775296c 0x0000000017782b62 0x0000000019d90a48 0x0000000019ae0f02 0x0000000019ad2a50 0x0000000019ad6903 0x000000001350a7eb 0x0000000013511b66 0x00000000135077d2 0x000000001350f29a 0x00007f1564d85ea5 0x00007f1564aae96d
[localhost.localdomain] 2025.10.13 10:57:00.540931 [ 79454 ] BaseDaemon: 2. DB::IAggregateFunctionHelper<DB::(anonymous namespace)::AggregateFunctionAny<DB::SingleValueDataFixed<char8_t>>>::addBatchSparse(unsigned long, unsigned long, char**, unsigned long, DB::IColumn const**, DB::Arena*) const @ 0x0000000014846d2d
[localhost.localdomain] 2025.10.13 10:57:00.540954 [ 79454 ] BaseDaemon: 3. DB::Aggregator::executeAggregateInstructions(DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, std::unique_ptr<char* [], std::default_delete<char* []>> const&, unsigned long, bool, bool, bool) const @ 0x00000000177804d0
[localhost.localdomain] 2025.10.13 10:57:00.540981 [ 79454 ] BaseDaemon: 4. void DB::Aggregator::executeImplBatch<false, DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, bool, char*) const @ 0x00000000178e652f
[localhost.localdomain] 2025.10.13 10:57:00.541000 [ 79454 ] BaseDaemon: 5. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::ColumnsHashing::HashMethodSerialized<PairNoInit<StringRef, char*>, char*, false, false>&, DB::Arena*, unsigned long, unsigned long, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x00000000178e5474
[localhost.localdomain] 2025.10.13 10:57:00.541025 [ 79454 ] BaseDaemon: 6. void DB::Aggregator::executeImpl<DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>>(DB::AggregationMethodSerialized<TwoLevelHashMapTable<StringRef, HashMapCellWithSavedHash<StringRef, char*, DefaultHash, HashTableNoState>, DefaultHash, TwoLevelHashTableGrower<8ul>, Allocator<true, true>, HashMapTable>, false, false>&, DB::Arena*, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, DB::ColumnsHashing::LastElementCacheStats&, bool, bool, char*) const @ 0x000000001777a777
[localhost.localdomain] 2025.10.13 10:57:00.541046 [ 79454 ] BaseDaemon: 7. DB::Aggregator::executeImpl(DB::AggregatedDataVariants&, unsigned long, unsigned long, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, DB::Aggregator::AggregateFunctionInstruction*, bool, bool, char*) const @ 0x000000001775296c
[localhost.localdomain] 2025.10.13 10:57:00.541069 [ 79454 ] BaseDaemon: 8. DB::Aggregator::executeOnBlock(std::vector<COWDB::IColumn::immutable_ptrDB::IColumn, std::allocator<COWDB::IColumn::immutable_ptrDB::IColumn>>, unsigned long, unsigned long, DB::AggregatedDataVariants&, std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>&, std::vector<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>, std::allocator<std::vector<DB::IColumn const*, std::allocator<DB::IColumn const*>>>>&, bool&) const @ 0x0000000017782b62
[localhost.localdomain] 2025.10.13 10:57:00.541086 [ 79454 ] BaseDaemon: 9. DB::AggregatingTransform::work() @ 0x0000000019d90a48
[localhost.localdomain] 2025.10.13 10:57:00.541100 [ 79454 ] BaseDaemon: 10. DB::ExecutionThreadContext::executeTask() @ 0x0000000019ae0f02
[localhost.localdomain] 2025.10.13 10:57:00.541116 [ 79454 ] BaseDaemon: 11. DB::PipelineExecutor::executeStepImpl(unsigned long, DB::IAcquiredSlot*, std::atomic) @ 0x0000000019ad2a50
[localhost.localdomain] 2025.10.13 10:57:00.541137 [ 79454 ] BaseDaemon: 12. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<DB::PipelineExecutor::spawnThreads(std::shared_ptrDB::IAcquiredSlot)::$_0, void ()>>(std::__function::__policy_storage const) @ 0x0000000019ad6903
[localhost.localdomain] 2025.10.13 10:57:00.541154 [ 79454 ] BaseDaemon: 13. ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::worker() @ 0x000000001350a7eb
[localhost.localdomain] 2025.10.13 10:57:00.541177 [ 79454 ] BaseDaemon: 14. void std::__function::__policy_invoker<void ()>::__call_impl[abi:ne190107]<std::__function::__default_alloc_func<ThreadFromGlobalPoolImpl<false, true>::ThreadFromGlobalPoolImpl<void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool>(void (ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool::&&)(), ThreadPoolImpl<ThreadFromGlobalPoolImpl<false, true>>::ThreadFromThreadPool&&)::'lambda'(), void ()>>(std::__function::__policy_storage const*) @ 0x0000000013511b66
[localhost.localdomain] 2025.10.13 10:57:00.541191 [ 79454 ] BaseDaemon: 15. ThreadPoolImplstd::thread::ThreadFromThreadPool::worker() @ 0x00000000135077d2
[localhost.localdomain] 2025.10.13 10:57:00.541212 [ 79454 ] BaseDaemon: 16. void* std::__thread_proxy[abi:ne190107]<std::tuple<std::unique_ptr<std::__thread_struct, std::default_deletestd::__thread_struct>, void (ThreadPoolImplstd::thread::ThreadFromThreadPool::)(), ThreadPoolImplstd::thread::ThreadFromThreadPool>>(void*) @ 0x000000001350f29a
[localhost.localdomain] 2025.10.13 10:57:00.541242 [ 79454 ] BaseDaemon: 17. start_thread @ 0x0000000000007ea5
[localhost.localdomain] 2025.10.13 10:57:00.541278 [ 79454 ] BaseDaemon: 18. __clone @ 0x00000000000fe96d
[localhost.localdomain] 2025.10.13 10:57:00.806301 [ 79454 ] BaseDaemon: Integrity check of the executable successfully passed (checksum: C9F4DE12801BA5BB5CA0888A285B0309)
[localhost.localdomain] 2025.10.13 10:57:00.807932 [ 79454 ] BaseDaemon: Report this error to https://github.com/ClickHouse/ClickHouse/issues
[localhost.localdomain] 2025.10.13 10:57:00.808024 [ 79454 ] BaseDaemon: Changed settings: max_threads = 5, max_query_size = 10485760, use_uncompressed_cache = false, os_thread_priority = 1, max_rows_to_group_by = 1000000, group_by_overflow_mode = 'any', max_ast_elements = 10000000, max_expanded_ast_elements = 10000000, max_memory_usage = 20000000000, parallel_view_processing = false, optimize_aggregation_in_order = true, optimize_on_insert = false, parallel_replicas_for_cluster_engines = false, background_pool_size = 36
Error on processing query: Code: 32. DB::Exception: Attempt to read after eof: while receiving packet from 127.0.0.1:9001, local address: 127.0.0.1:42318. (ATTEMPT_TO_READ_AFTER_EOF) (version 25.8.8.26 (official build))
(query: INSERT INTO testdb.target_table_buffer (
SrcIp, SrcPort, DestIp, DestPort, SegmentValue
)
SELECT
if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort), any(SegmentValue)
FROM testdb.source_table
WHERE timestamp >= 1760160000 AND timestamp < 1760160060
GROUP BY if(FlowDirection = 0, SrcIp, DestIp), if(FlowDirection = 0, SrcPort, DestPort), if(FlowDirection = 0, DestIp, SrcIp), if(FlowDirection = 0, DestPort, SrcPort)
SETTINGS group_by_overflow_mode = 'any', max_rows_to_group_by = 1000000, max_threads = 5, max_memory_usage = 20000000000)