Skip to content

Commit ce4a3ca

Browse files
poyrazKgithub-actions[bot]
authored andcommitted
style: automated clang-format fixes
1 parent fb0c485 commit ce4a3ca

13 files changed

Lines changed: 158 additions & 99 deletions

File tree

include/catalog/catalog.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,8 @@ class Catalog : public raft::RaftStateMachine {
193193
/**
194194
* @brief Local-only table creation (called by Raft)
195195
*/
196-
oid_t create_table_local(const std::string& table_name, std::vector<ColumnInfo> columns, std::vector<ShardInfo> shards = {});
196+
oid_t create_table_local(const std::string& table_name, std::vector<ColumnInfo> columns,
197+
std::vector<ShardInfo> shards = {});
197198

198199
/**
199200
* @brief Drop a table

include/common/cluster_manager.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,8 @@ class ClusterManager {
128128
coordinators.push_back(self_node_);
129129
}
130130
for (const auto& [id, info] : nodes_) {
131-
if (info.role == config::RunMode::Coordinator && info.is_active && id != self_node_.id) {
131+
if (info.role == config::RunMode::Coordinator && info.is_active &&
132+
id != self_node_.id) {
132133
coordinators.push_back(info);
133134
}
134135
}

include/distributed/shard_manager.hpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,9 @@
77
#define SQL_ENGINE_DISTRIBUTED_SHARD_MANAGER_HPP
88

99
#include <cstdint>
10+
#include <optional>
1011
#include <string>
1112
#include <vector>
12-
#include <optional>
1313

1414
#include "catalog/catalog.hpp"
1515
#include "common/value.hpp"
@@ -49,7 +49,8 @@ class ShardManager {
4949
/**
5050
* @brief Find which data node is responsible for a given shard ID
5151
*/
52-
static std::optional<cloudsql::ShardInfo> get_target_node(const cloudsql::TableInfo& table, uint32_t shard_id) {
52+
static std::optional<cloudsql::ShardInfo> get_target_node(const cloudsql::TableInfo& table,
53+
uint32_t shard_id) {
5354
for (const auto& shard : table.shards) {
5455
if (shard.shard_id == shard_id) {
5556
return shard;

include/network/rpc_message.hpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,8 @@ class Serializer {
5353

5454
if (val.is_numeric()) {
5555
// POC: unify all numerics to 64-bit float/int in the stream for simplicity
56-
if (val.type() == common::ValueType::TYPE_FLOAT32 || val.type() == common::ValueType::TYPE_FLOAT64) {
56+
if (val.type() == common::ValueType::TYPE_FLOAT32 ||
57+
val.type() == common::ValueType::TYPE_FLOAT64) {
5758
double v = val.to_float64();
5859
const size_t offset = out.size();
5960
out.resize(offset + VAL_SIZE_64);
@@ -71,7 +72,7 @@ class Serializer {
7172
case common::ValueType::TYPE_TEXT:
7273
case common::ValueType::TYPE_VARCHAR:
7374
case common::ValueType::TYPE_CHAR: {
74-
const std::string& s = val.to_string(); // fallback to string for anything else
75+
const std::string& s = val.to_string(); // fallback to string for anything else
7576
const auto len = static_cast<uint32_t>(s.size());
7677
const size_t offset = out.size();
7778
out.resize(offset + VAL_SIZE_32 + len);
@@ -261,7 +262,7 @@ struct RegisterNodeArgs {
261262
std::string id;
262263
std::string address;
263264
uint16_t port;
264-
uint8_t mode; // 0: Standalone, 1: Coordinator, 2: Data
265+
uint8_t mode; // 0: Standalone, 1: Coordinator, 2: Data
265266

266267
[[nodiscard]] std::vector<uint8_t> serialize() const {
267268
std::vector<uint8_t> out;

src/catalog/catalog.cpp

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222

2323
namespace cloudsql {
2424

25-
2625
/**
2726
* @brief Create a new catalog
2827
*/
@@ -73,13 +72,14 @@ bool Catalog::save(const std::string& filename) const {
7372
*/
7473
oid_t Catalog::create_table(const std::string& table_name, std::vector<ColumnInfo> columns) {
7574
std::cerr << "--- [Catalog] create_table CALLED for " << table_name << " ---" << std::endl;
76-
75+
7776
// Compute shards from ClusterManager for serialization
7877
std::vector<ShardInfo> shards;
7978
if (cluster_manager_ != nullptr) {
8079
auto data_nodes = cluster_manager_->get_data_nodes();
8180
if (!data_nodes.empty()) {
82-
std::sort(data_nodes.begin(), data_nodes.end(), [](const auto& a, const auto& b) { return a.id < b.id; });
81+
std::sort(data_nodes.begin(), data_nodes.end(),
82+
[](const auto& a, const auto& b) { return a.id < b.id; });
8383
uint32_t sid = 0;
8484
for (const auto& node : data_nodes) {
8585
ShardInfo shard;
@@ -93,7 +93,8 @@ oid_t Catalog::create_table(const std::string& table_name, std::vector<ColumnInf
9393

9494
if (raft_group_ != nullptr) {
9595
// Multi-Raft: Replicate DDL via Catalog Raft Group (ID 0)
96-
// Serialize command: [Type:1][NameLen:4][Name][ColCount:4][Cols...][ShardCount:4][Shards...]
96+
// Serialize command:
97+
// [Type:1][NameLen:4][Name][ColCount:4][Cols...][ShardCount:4][Shards...]
9798
std::vector<uint8_t> cmd;
9899
cmd.push_back(1); // Type 1: CreateTable
99100

@@ -126,7 +127,7 @@ oid_t Catalog::create_table(const std::string& table_name, std::vector<ColumnInf
126127
for (const auto& shard : shards) {
127128
uint32_t addr_len = static_cast<uint32_t>(shard.node_address.size());
128129
offset = cmd.size();
129-
cmd.resize(offset + 4 + addr_len + 4 + 2);
130+
cmd.resize(offset + 4 + addr_len + 4 + 2);
130131
std::memcpy(cmd.data() + offset, &addr_len, 4);
131132
std::memcpy(cmd.data() + offset + 4, shard.node_address.data(), addr_len);
132133
std::memcpy(cmd.data() + offset + 4 + addr_len, &shard.shard_id, 4);
@@ -137,13 +138,15 @@ oid_t Catalog::create_table(const std::string& table_name, std::vector<ColumnInf
137138
return create_table_local(table_name, std::move(columns), std::move(shards));
138139
}
139140
}
140-
141+
141142
return create_table_local(table_name, std::move(columns), std::move(shards));
142143
}
143144

144-
oid_t Catalog::create_table_local(const std::string& table_name, std::vector<ColumnInfo> columns, std::vector<ShardInfo> shards) {
145+
oid_t Catalog::create_table_local(const std::string& table_name, std::vector<ColumnInfo> columns,
146+
std::vector<ShardInfo> shards) {
145147
if (table_exists_by_name(table_name)) {
146-
std::cerr << "--- [Catalog] create_table_local: Table already exists " << table_name << " ---" << std::endl;
148+
std::cerr << "--- [Catalog] create_table_local: Table already exists " << table_name
149+
<< " ---" << std::endl;
147150
auto meta_opt = get_table_by_name(table_name);
148151
return (*meta_opt)->table_id;
149152
}
@@ -158,7 +161,8 @@ oid_t Catalog::create_table_local(const std::string& table_name, std::vector<Col
158161
if (table->shards.empty() && cluster_manager_ != nullptr) {
159162
auto data_nodes = cluster_manager_->get_data_nodes();
160163
if (!data_nodes.empty()) {
161-
std::sort(data_nodes.begin(), data_nodes.end(), [](const auto& a, const auto& b) { return a.id < b.id; });
164+
std::sort(data_nodes.begin(), data_nodes.end(),
165+
[](const auto& a, const auto& b) { return a.id < b.id; });
162166
uint32_t sid = 0;
163167
for (const auto& node : data_nodes) {
164168
ShardInfo shard;
@@ -178,7 +182,8 @@ oid_t Catalog::create_table_local(const std::string& table_name, std::vector<Col
178182
table->shards.push_back(shard);
179183
}
180184

181-
std::cerr << "--- [Catalog] Table " << table_name << " initialized with " << table->shards.size() << " shards ---" << std::endl;
185+
std::cerr << "--- [Catalog] Table " << table_name << " initialized with "
186+
<< table->shards.size() << " shards ---" << std::endl;
182187

183188
const oid_t id = table->table_id;
184189
tables_[id] = std::move(table);
@@ -215,12 +220,13 @@ bool Catalog::drop_table_local(oid_t table_id) {
215220

216221
void Catalog::apply(const raft::LogEntry& entry) {
217222
if (entry.data.empty()) return;
218-
std::cerr << "--- [Catalog] apply CALLED for entry type " << (int)entry.data[0] << " ---" << std::endl;
223+
std::cerr << "--- [Catalog] apply CALLED for entry type " << (int)entry.data[0] << " ---"
224+
<< std::endl;
219225

220226
uint8_t type = entry.data[0];
221227
if (type == 1) { // CreateTable
222228
size_t offset = 1;
223-
229+
224230
uint32_t name_len = 0;
225231
std::memcpy(&name_len, entry.data.data() + offset, 4);
226232
offset += 4;
@@ -296,13 +302,13 @@ std::optional<TableInfo*> Catalog::get_table_by_name(const std::string& table_na
296302
return pair.second.get();
297303
}
298304
}
299-
305+
300306
std::cerr << "--- [Catalog] Table NOT FOUND: " << table_name << ". Catalog contains: ";
301307
for (auto& pair : tables_) {
302308
std::cerr << pair.second->name << ", ";
303309
}
304310
std::cerr << " ---" << std::endl;
305-
311+
306312
return std::nullopt;
307313
}
308314

src/distributed/distributed_executor.cpp

Lines changed: 37 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
#include <future>
1010
#include <iostream>
1111
#include <string>
12+
#include <thread>
1213
#include <utility>
1314
#include <vector>
14-
#include <thread>
1515

1616
#include "catalog/catalog.hpp"
1717
#include "common/cluster_manager.hpp"
@@ -132,7 +132,7 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
132132
std::vector<ColumnInfo> catalog_cols;
133133
uint16_t pos = 0;
134134
for (const auto& col : ct.columns()) {
135-
common::ValueType vtype = common::ValueType::TYPE_INT32; // Simplified for POC
135+
common::ValueType vtype = common::ValueType::TYPE_INT32; // Simplified for POC
136136
if (col.type_ == "TEXT") vtype = common::ValueType::TYPE_TEXT;
137137
catalog_cols.emplace_back(col.name_, vtype, pos++);
138138
}
@@ -145,7 +145,8 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
145145
}
146146
}
147147

148-
// Explicit forward to data nodes to ensure they have metadata IMMEDIATELY (POC workaround for Raft lag)
148+
// Explicit forward to data nodes to ensure they have metadata IMMEDIATELY (POC
149+
// workaround for Raft lag)
149150
network::ExecuteFragmentArgs args;
150151
args.sql = raw_sql;
151152
args.context_id = "ddl_sync";
@@ -155,10 +156,11 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
155156
network::RpcClient client(node.address, node.cluster_port);
156157
if (client.connect()) {
157158
std::vector<uint8_t> resp;
158-
static_cast<void>(client.call(network::RpcType::ExecuteFragment, payload, resp));
159+
static_cast<void>(
160+
client.call(network::RpcType::ExecuteFragment, payload, resp));
159161
}
160162
}
161-
163+
162164
res.set_rows_affected(1);
163165
// Small sleep after DDL to let things settle
164166
std::this_thread::sleep_for(std::chrono::milliseconds(500));
@@ -357,12 +359,13 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
357359
const auto* insert_stmt = dynamic_cast<const parser::InsertStatement*>(&stmt);
358360
if (insert_stmt != nullptr && !insert_stmt->values().empty()) {
359361
std::unordered_map<uint32_t, std::vector<std::vector<std::string>>> partitions;
360-
362+
361363
for (const auto& row_exprs : insert_stmt->values()) {
362364
if (row_exprs.empty()) continue;
363365
// Assume first column is sharding key
364366
if (row_exprs[0]->type() == parser::ExprType::Constant) {
365-
const auto* const_expr = dynamic_cast<const parser::ConstantExpr*>(row_exprs[0].get());
367+
const auto* const_expr =
368+
dynamic_cast<const parser::ConstantExpr*>(row_exprs[0].get());
366369
if (const_expr != nullptr) {
367370
const common::Value pk_val = const_expr->value();
368371
const uint32_t shard_idx = cluster::ShardManager::compute_shard(
@@ -384,11 +387,13 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
384387
const auto& node = data_nodes[shard_idx];
385388
network::RpcClient client(node.address, node.cluster_port);
386389
if (client.connect()) {
387-
std::string shard_sql = "INSERT INTO " + insert_stmt->table()->to_string() + " VALUES ";
390+
std::string shard_sql =
391+
"INSERT INTO " + insert_stmt->table()->to_string() + " VALUES ";
388392
for (size_t i = 0; i < rows.size(); ++i) {
389393
shard_sql += "(";
390394
for (size_t j = 0; j < rows[i].size(); ++j) {
391-
shard_sql += rows[i][j] + std::string(j == rows[i].size() - 1 ? "" : ", ");
395+
shard_sql +=
396+
rows[i][j] + std::string(j == rows[i].size() - 1 ? "" : ", ");
392397
}
393398
shard_sql += std::string(")") + (i == rows.size() - 1 ? "" : ", ");
394399
}
@@ -411,22 +416,22 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
411416
errors += "[" + node.id + "] Connect failed; ";
412417
}
413418
}
414-
419+
415420
QueryResult res;
416421
if (!errors.empty()) res.set_error(errors);
417422
res.set_rows_affected(total_affected);
418423
return res;
419424
}
420425
} else if (type == parser::StmtType::Select || type == parser::StmtType::Update ||
421426
type == parser::StmtType::Delete) {
422-
423427
bool is_join = false;
424428
if (type == parser::StmtType::Select) {
425429
const auto* sel = dynamic_cast<const parser::SelectStatement*>(&stmt);
426430
if (sel && !sel->joins().empty()) is_join = true;
427431
}
428432

429-
// Try shard pruning based on WHERE clause, but ONLY if NOT a join (joins are complex in POC)
433+
// Try shard pruning based on WHERE clause, but ONLY if NOT a join (joins are complex in
434+
// POC)
430435
const parser::Expression* where_expr = nullptr;
431436
if (!is_join) {
432437
if (type == parser::StmtType::Select) {
@@ -465,7 +470,8 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
465470
}
466471

467472
network::ExecuteFragmentArgs fragment_args;
468-
// Strip LIMIT/OFFSET from fragment SQL to ensure data nodes return all rows for global processing
473+
// Strip LIMIT/OFFSET from fragment SQL to ensure data nodes return all rows for global
474+
// processing
469475
fragment_args.sql = (type == parser::StmtType::Select) ? strip_limit_offset(raw_sql) : raw_sql;
470476
fragment_args.context_id = context_id;
471477
auto fragment_payload = fragment_args.serialize();
@@ -526,8 +532,10 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
526532
if (col->type() == parser::ExprType::Function) {
527533
const auto* func = dynamic_cast<const parser::FunctionExpr*>(col.get());
528534
std::string name = func->name();
529-
std::transform(name.begin(), name.end(), name.begin(), [](unsigned char c){ return std::toupper(c); });
530-
if (name == "COUNT" || name == "SUM" || name == "MIN" || name == "MAX" || name == "AVG") {
535+
std::transform(name.begin(), name.end(), name.begin(),
536+
[](unsigned char c) { return std::toupper(c); });
537+
if (name == "COUNT" || name == "SUM" || name == "MIN" || name == "MAX" ||
538+
name == "AVG") {
531539
is_global_aggregate = true;
532540
agg_types.push_back(name);
533541
} else {
@@ -548,7 +556,7 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
548556
if (row.size() < agg_types.size()) continue;
549557
for (size_t i = 0; i < agg_types.size(); ++i) {
550558
if (agg_types[i].empty()) continue;
551-
559+
552560
const auto& val = row.get(i);
553561
if (val.is_null()) continue;
554562

@@ -569,7 +577,7 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
569577
}
570578
}
571579
}
572-
580+
573581
executor::Tuple merged_tuple;
574582
for (auto& v : final_vals) {
575583
merged_tuple.values().push_back(std::move(v));
@@ -587,18 +595,21 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
587595
if (col_idx == static_cast<size_t>(-1)) {
588596
// try unqualified
589597
size_t dot = col_name.find_last_of('.');
590-
if (dot != std::string::npos) col_idx = res.schema().find_column(col_name.substr(dot+1));
598+
if (dot != std::string::npos)
599+
col_idx = res.schema().find_column(col_name.substr(dot + 1));
591600
}
592601

593602
if (col_idx == static_cast<size_t>(-1)) {
594603
// Fallback for POC if ORDER BY key is not in projection
595604
col_idx = 0;
596605
}
597606

598-
if (col_idx != static_cast<size_t>(-1) && col_idx < res.schema().columns().size()) {
599-
std::sort(aggregated_rows.begin(), aggregated_rows.end(), [col_idx](const auto& a, const auto& b) {
600-
return a.get(col_idx) < b.get(col_idx);
601-
});
607+
if (col_idx != static_cast<size_t>(-1) &&
608+
col_idx < res.schema().columns().size()) {
609+
std::sort(aggregated_rows.begin(), aggregated_rows.end(),
610+
[col_idx](const auto& a, const auto& b) {
611+
return a.get(col_idx) < b.get(col_idx);
612+
});
602613
}
603614
}
604615
}
@@ -609,15 +620,16 @@ QueryResult DistributedExecutor::execute(const parser::Statement& stmt,
609620
if (sel && (sel->has_limit() || sel->has_offset())) {
610621
int64_t limit = sel->limit();
611622
int64_t offset = sel->offset();
612-
623+
613624
if (offset > 0) {
614625
if (static_cast<size_t>(offset) >= aggregated_rows.size()) {
615626
aggregated_rows.clear();
616627
} else {
617-
aggregated_rows.erase(aggregated_rows.begin(), aggregated_rows.begin() + offset);
628+
aggregated_rows.erase(aggregated_rows.begin(),
629+
aggregated_rows.begin() + offset);
618630
}
619631
}
620-
632+
621633
if (limit >= 0 && static_cast<size_t>(limit) < aggregated_rows.size()) {
622634
aggregated_rows.resize(limit);
623635
}

0 commit comments

Comments
 (0)