8000 Agency hasAsXXX using std::optional (#14376) · arangodb/arangodb@70ea80f · GitHub
[go: up one dir, main page]

Skip to content

Commit 70ea80f

Browse files
author
Lars Maier
authored
Agency hasAsXXX using std::optional (#14376)
1 parent d491680 commit 70ea80f

28 files changed

+686
-788
lines changed

arangod/Agency/ActiveFailoverJob.cpp

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -46,14 +46,14 @@ ActiveFailoverJob::ActiveFailoverJob(Node const& snapshot, AgentInterface* agent
4646
auto tmp_server = _snapshot.hasAsString(path + "server");
4747
auto tmp_creator = _snapshot.hasAsString(path + "creator");
4848

49-
if (tmp_server.second && tmp_creator.second) {
50-
_server = tmp_server.first;
51-
_creator = tmp_creator.first;
49+
if (tmp_server && tmp_creator) {
50+
_server = tmp_server.value();
51+
_creator = tmp_creator.value();
5252
} else {
5353
std::stringstream err;
5454
err << "Failed to find job " << _jobId << " in agency.";
5555
LOG_TOPIC("c756b", ERR, Logger::SUPERVISION) << err.str();
56-
finish(tmp_server.first, "", false, err.str());
56+
finish(tmp_server.value(), "", false, err.str());
5757
_status = FAILED;
5858
}
5959
}
@@ -109,7 +109,7 @@ bool ActiveFailoverJob::create(std::shared_ptr<VPackBuilder> envelope) {
109109
_jb->add(VPackValue(failedServersPrefix));
110110
{
111111
VPackObjectBuilder old(_jb.get());
112-
_jb->add("old", _snapshot(failedServersPrefix).toBuilder().slice());
112+
_jb->add("old", _snapshot.get(failedServersPrefix).value().get().toBuilder().slice());
113113
}
114114
} // Preconditions
115115
} // transactions
@@ -144,7 +144,7 @@ bool ActiveFailoverJob::start(bool&) {
144144
}
145145

146146
auto leader = _snapshot.hasAsSlice(asyncReplLeader);
147-
if (!leader.second || leader.first.compareString(_server) != 0) {
147+
if (!leader || leader->compareString(_server) != 0) {
148148
std::string reason =
149149
"Server " + _server + " is not the current replication leader";
150150
LOG_TOPIC("d468e", INFO, Logger::SUPERVISION) << reason;
@@ -153,10 +153,10 @@ bool ActiveFailoverJob::start(bool&) {
153153

154154
// Abort job blocking server if abortable
155155
auto jobId = _snapshot.hasAsString(blockedServersPrefix + _server);
156-
if (jobId.second && !abortable(_snapshot, jobId.first)) {
156+
if (jobId && !abortable(_snapshot, *jobId)) {
157157
return false;
158-
} else if (jobId.second) {
159-
JobContext(PENDING, jobId.first, _snapshot, _agent).abort("ActiveFailoverJob requests abort");
158+
} else if (jobId) {
159+
JobContext(PENDING, *jobId, _snapshot, _agent).abort("ActiveFailoverJob requests abort");
160160
}
161161

162162
// Todo entry
@@ -165,7 +165,7 @@ bool ActiveFailoverJob::start(bool&) {
165165
VPackArrayBuilder t(&todo);
166166
if (_jb == nullptr) {
167167
try {
168-
_snapshot(toDoPrefix + _jobId).toBuilder(todo);
168+
_snapshot.get(toDoPrefix + _jobId).value().get().toBuilder(todo);
169169
} catch (std::exception const&) {
170170
LOG_TOPIC("26fec", INFO, Logger::SUPERVISION) << "Failed to get key " + toDoPrefix + _jobId +
171171
" from agency snapshot";
@@ -206,7 +206,7 @@ bool ActiveFailoverJob::start(bool&) {
206206
// Destination server should not be blocked by another job
207207
addPreconditionServerNotBlocked(pending, newLeader);
208208
// AsyncReplication leader must be the failed server
209-
addPreconditionUnchanged(pending, asyncReplLeader, leader.first);
209+
addPreconditionUnchanged(pending, asyncReplLeader, leader.value());
210210
} // precondition done
211211

212212
} // array for transaction done
@@ -263,7 +263,7 @@ std::string ActiveFailoverJob::findBestFollower() {
263263

264264
// blocked; (not sure if this can even happen)
265265
try {
266-
for (auto const& srv : _snapshot(blockedServersPrefix).children()) {
266+
for (auto const& srv : _snapshot.get(blockedServersPrefix).value().get().children()) {
267267
healthy.erase(std::remove(healthy.begin(), healthy.end(), srv.first),
268268
healthy.end());
269269
}

arangod/Agency/AddFollower.cpp

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,18 @@ AddFollower::AddFollower(Node const& snapshot, AgentInterface* agent,
4949
auto tmp_shard = _snapshot.hasAsString(path + "shard");
5050
auto tmp_creator = _snapshot.hasAsString(path + "creator");
5151

52-
if (tmp_database.second && tmp_collection.second && tmp_shard.second &&
53-
tmp_creator.second) {
54-
_database = tmp_database.first;
55-
_collection = tmp_collection.first;
56-
_shard = tmp_shard.first;
57-
_creator = tmp_creator.first;
52+
if (tmp_database && tmp_collection && tmp_shard &&
53+
tmp_creator) {
54+
_database = tmp_database.value();
55+
_collection = tmp_collection.value();
56+
_shard = tmp_shard.value();
57+
_creator = tmp_creator.value();
5858
} else {
5959
std::stringstream err;
6060
err << "Failed to find job " << _jobId << " in agency.";
6161
LOG_TOPIC("4d260", ERR, Logger::SUPERVISION) << err.str();
62-
finish("", tmp_shard.first, false, err.str());
62+
// TODO this call to finish is invokes a virtual member function within a constructor
63+
finish("", tmp_shard.value_or(""), false, err.str());
6364
_status = FAILED;
6465
}
6566
}
@@ -131,7 +132,7 @@ bool AddFollower::start(bool&) {
131132
return false;
132133
}
133134
Node const& collection =
134-
_snapshot.hasAsNode(planColPrefix + _database + "/" + _collection).first;
135+
_snapshot.hasAsNode(planColPrefix + _database + "/" + _collection)->get();
135136
if (collection.has("distributeShardsLike")) {
136137
finish("", "", false,
137138
"collection must not have 'distributeShardsLike' attribute");
@@ -142,19 +143,19 @@ bool AddFollower::start(bool&) {
142143
std::string planPath =
143144
planColPrefix + _database + "/" + _collection + "/shards/" + _shard;
144145

145-
Slice planned = _snapshot.hasAsSlice(planPath).first;
146+
Slice planned = _snapshot.hasAsSlice(planPath).value();
146147

147148
TRI_ASSERT(planned.isArray());
148149

149150
// First check that we still have too few followers for the current
150151
// `replicationFactor`:
151152
size_t desiredReplFactor = 1;
152153
auto replFact = collection.hasAsUInt(StaticStrings::ReplicationFactor);
153-
if (replFact.second) {
154-
desiredReplFactor = replFact.first;
154+
if (replFact) {
155+
desiredReplFactor = replFact.value();
155156
} else {
156157
auto replFact2 = collection.hasAsString(StaticStrings::ReplicationFactor);
157-
if (replFact2.second && replFact2.first == StaticStrings::Satellite) {
158+
if (replFact2 && replFact2.value() == StaticStrings::Satellite) {
158159
// satellites => distribute to every server
159160
auto available = Job::availableServers(_snapshot);
160161
desiredReplFactor = Job::countGoodOrBadServersInList(_snapshot, available);
@@ -251,7 +252,7 @@ bool AddFollower::start(bool&) {
251252
// in _jb:
252253
if (_jb == nullptr) {
253254
auto tmp_todo = _snapshot.hasAsBuilder(toDoPrefix + _jobId, todo);
254-
if (!tmp_todo.second) {
255+
if (!tmp_todo) {
255256
// Just in case, this is never going to happen, since we will only
256257
// call the start() method if the job is already in ToDo.
257258
LOG_TOPIC("24c50", INFO, Logger::SUPERVISION) << "Failed to get key " + toDoPrefix + _jobId +
@@ -313,8 +314,8 @@ bool AddFollower::start(bool&) {
313314
std::string foCandsPath = curPath.substr(0, curPath.size() - 7);
314315
foCandsPath += StaticStrings::FailoverCandidates;
315316
auto foCands = this->_snapshot.hasAsSlice(foCandsPath);
316-
if (foCands.second) {
317-
addPreconditionUnchanged(trx, foCandsPath, foCands.first);
317+
if (foCands) {
318+
addPreconditionUnchanged(trx, foCandsPath, foCands.value());
318319
}
319320
});
320321
addPreconditionShardNotBlocked(trx, _shard);

arangod/Agency/CleanOutServer.cpp

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -45,14 +45,14 @@ CleanOutServer::CleanOutServer(Node const& snapshot, AgentInterface* agent,
4545
auto tmp_server = _snapshot.hasAsString(path + "server");
4646
auto tmp_creator = _snapshot.hasAsString(path + "creator");
4747

48-
if (tmp_server.second && tmp_creator.second) {
49-
_server = tmp_server.first;
50-
_creator = tmp_creator.first;
48+
if (tmp_server && tmp_creator) {
49+
_server = tmp_server.value();
50+
_creator = tmp_creator.value();
5151
} else {
5252
std::stringstream err;
5353
err << "Failed to find job " << _jobId << " in agency.";
5454
LOG_TOPIC("38962", ERR, Logger::SUPERVISION) << err.str();
55-
finish(tmp_server.first, "", false, err.str());
55+
finish(tmp_server.value(), "", false, err.str());
5656
_status = FAILED;
5757
}
5858
}
@@ -66,8 +66,8 @@ JOB_STATUS CleanOutServer::status() {
6666
return _status;
6767
}
6868

69-
Node::Children const& todos = _snapshot.hasAsChildren(toDoPrefix).first;
70-
Node::Children const& pends = _snapshot.hasAsChildren(pendingPrefix).first;
69+
Node::Children const& todos = _snapshot.hasAsChildren(toDoPrefix).value().get();
70+
Node::Children const& pends = _snapshot.hasAsChildren(pendingPrefix).value().get();
7171
size_t found = 0;
7272

7373
for (auto const& subJob : todos) {
@@ -85,7 +85,7 @@ JOB_STATUS CleanOutServer::status() {
8585
// timeout here:
8686
auto tmp_time =
8787
_snapshot.hasAsString(pendingPrefix + _jobId + "/timeCreated");
88-
std::string timeCreatedString = tmp_time.first;
88+
std::string timeCreatedString = tmp_time.value();
8989
Supervision::TimePoint timeCreated = stringToTimepoint(timeCreatedString);
9090
Supervision::TimePoint now(std::chrono::system_clock::now());
9191
if (now - timeCreated > std::chrono::duration<double>(86400.0)) { // 1 day
@@ -95,7 +95,7 @@ JOB_STATUS CleanOutServer::status() {
9595
return PENDING;
9696
}
9797

98-
Node::Children const& failed = _snapshot.hasAsChildren(failedPrefix).first;
98+
Node::Children const& failed = _snapshot.hasAsChildren(failedPrefix).value().get();
9999
size_t failedFound = 0;
100100
for (auto const& subJob : failed) {
101101
if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) {
@@ -130,7 +130,7 @@ JOB_STATUS CleanOutServer::status() {
130130
}
131131
addRemoveJobFromSomewhere(reportTrx, "Pending", _jobId);
132132
Builder job;
133-
_snapshot.hasAsBuilder(pendingPrefix + _jobId, job);
133+
std::ignore = _snapshot.hasAsBuilder(pendingPrefix + _jobId, job);
134134
addPutJobIntoSomewhere(reportTrx, "Finished", job.slice(), "");
135135
addReleaseServer(reportTrx, _server);
136136
}
@@ -227,8 +227,8 @@ bool CleanOutServer::start(bool& aborts) {
227227
// Check that _to is not in `Target/CleanedServers`:
228228
VPackBuilder cleanedServersBuilder;
229229
auto const& cleanedServersNode = _snapshot.hasAsNode(cleanedPrefix);
230-
if (cleanedServersNode.second) {
231-
cleanedServersNode.first.toBuilder(cleanedServersBuilder);
230+
if (cleanedServersNode) {
231+
cleanedServersNode->get().toBuilder(cleanedServersBuilder);
232232
} else {
233233
// ignore this check
234234
cleanedServersBuilder.clear();
@@ -250,8 +250,8 @@ bool CleanOutServer::start(bool& aborts) {
250250
VPackBuilder failedServersBuilder;
251251
if (_snapshot.has(failedServersPrefix)) {
252252
auto const& failedServersNode = _snapshot.hasAsNode(failedServersPrefix);
253-
if (failedServersNode.second) {
254-
failedServersNode.first.toBuilder(failedServersBuilder);
253+
if (failedServersNode) {
254+
failedServersNode->get().toBuilder(failedServersBuilder);
255255
} else {
256256
// ignore this check
257257
failedServersBuilder.clear();
@@ -290,7 +290,7 @@ bool CleanOutServer::start(bool& aborts) {
290290
// in _jb:
291291
if (_jb == nullptr) {
292292
auto tmp_todo = _snapshot.hasAsBuilder(toDoPrefix + _jobId, todo);
293-
if (!tmp_todo.second) {
293+
if (!tmp_todo) {
294294
// Just in case, this is never going to happen, since we will only
295295
// call the start() method if the job is already in ToDo.
296296
LOG_TOPIC("1e9a9", INFO, Logger::SUPERVISION) << "Failed to get key " + toDoPrefix + _jobId +
@@ -345,7 +345,7 @@ bool CleanOutServer::start(bool& aborts) {
345345
addPreconditionServerHealth(*pending, _server, "GOOD");
346346
addPreconditionUnchanged(*pending, failedServersPrefix, failedServers);
347347
addPreconditionUnchanged(*pending, cleanedPrefix, cleanedServers);
348-
addPreconditionUnchanged(*pending, planVersion, _snapshot(planVersion).slice());
348+
addPreconditionUnchanged(*pending, planVersion, _snapshot.get(planVersion).value().get().slice());
349349
}
350350
} // array for transaction done
351351

@@ -367,7 +367,7 @@ bool CleanOutServer::start(bool& aborts) {
367367
bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
368368
std::vector<std::string> servers = availableServers(_snapshot);
369369

370-
Node::Children const& databases = _snapshot.hasAsChildren("/Plan/Collections").first;
370+
Node::Children const& databases = _snapshot.hasAsChildren("/Plan/Collections").value().get();
371371
size_t sub = 0;
372372

373373
for (auto const& database : databases) {
@@ -379,7 +379,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
379379
continue;
380380
}
381381

382-
for (auto const& shard : collection.hasAsChildren("shards").first) {
382+
for (auto const& shard : collection.hasAsChildren("shards").value().get()) {
383383
// Only shards, which are affected
384384
int found = -1;
385385
int count = 0;
@@ -395,7 +395,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
395395
}
396396

397397
auto replicationFactor = collection.hasAsString(StaticStrings::ReplicationFactor);
398-
bool isSatellite = replicationFactor.second && replicationFactor.first == StaticStrings::Satellite;
398+
bool isSatellite = replicationFactor && replicationFactor.value() == StaticStrings::Satellite;
399399
bool isLeader = (found == 0);
400400

401401
if (isSatellite) {
@@ -412,7 +412,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
412412
} else {
413413
// Intentionally do nothing. RemoveServer will remove the failed follower
414414
LOG_TOPIC("22ca1", DEBUG, Logger::SUPERVISION) <<
415-
"Do nothing for cleanout of follower of the SatelliteCollection " << collection.hasAsString("id").first;
415+
"Do nothing for cleanout of follower of the SatelliteCollection " << collection.hasAsString("id").value();
416416
continue ;
417417
}
418418
} else {
@@ -467,11 +467,11 @@ bool CleanOutServer::checkFeasibility() {
467467
uint64_t maxReplFact = 1;
468468
std::vector<std::string> tooLargeCollections;
469469
std::vector<uint64_t> tooLargeFactors;
470-
Node::Children const& databases = _snapshot.hasAsChildren("/Plan/Collections").first;
470+
Node::Children const& databases = _snapshot.hasAsChildren("/Plan/Collections").value().get();
471471
for (auto const& database : databases) {
472472
for (auto const& collptr : database.second->children()) {
473473
try {
474-
uint64_t replFact = (*collptr.second).hasAsUInt("replicationFactor").first;
474+
uint64_t replFact = (*collptr.second).hasAsUInt("replicationFactor").value();
475475
if (replFact > numRemaining) {
476476
tooLargeCollections.push_back(collptr.first);
477477
tooLargeFactors.push_back(replFact);
@@ -522,8 +522,8 @@ arangodb::Result CleanOutServer::abort(std::string const& reason) {
522522
}
523523

524524
// Abort all our subjobs:
525-
Node::Children const& todos = _snapshot.hasAsChildren(toDoPrefix).first;
526-
Node::Children const& pends = _snapshot.hasAsChildren(pendingPrefix).first;
525+
Node::Children const& todos = _snapshot.hasAsChildren(toDoPrefix).value().get();
526+
Node::Children const& pends = _snapshot.hasAsChildren(pendingPrefix).value().get();
527527

528528
std::string childAbortReason = "parent job aborted - reason: " + reason;
529529

arangod/Agency/FailedFollower.cpp

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -52,22 +52,22 @@ FailedFollower::FailedFollower(Node const& snapshot, AgentInterface* agent,
5252
// set only if already started (test to prevent warning)
5353
if (_snapshot.has(path + "toServer")) {
5454
auto tmp_to = _snapshot.hasAsString(path + "toServer");
55-
_to = tmp_to.first;
55+
_to = tmp_to.value();
5656
}
5757

5858
auto tmp_shard = _snapshot.hasAsString(path + "shard");
5959
auto tmp_creator = _snapshot.hasAsString(path + "creator");
6060
auto tmp_created = _snapshot.hasAsString(path + "timeCreated");
6161

62-
if (tmp_database.second && tmp_collection.second && tmp_from.second &&
63-
tmp_shard.second && tmp_creator.second && tmp_created.second) {
64-
_database = tmp_database.first;
65-
_collection = tmp_collection.first;
66-
_from = tmp_from.first;
62+
if (tmp_database && tmp_collection && tmp_from &&
63+
tmp_shard && tmp_creator && tmp_created) {
64+
_database = tmp_database.value();
65+
_collection = tmp_collection.value();
66+
_from = tmp_from.value();
6767
// _to conditionally set above
68-
_shard = tmp_shard.first;
69-
_creator = tmp_creator.first;
70-
_created = stringToTimepoint(tmp_created.first);
68+
_shard = tmp_shard.value();
69+
_creator = tmp_creator.value();
70+
_created = stringToTimepoint(tmp_created.value());
7171
} else {
7272
std::stringstream err;
7373
err << "Failed to find job " << _jobId << " in agency";
@@ -143,12 +143,12 @@ bool FailedFollower::start(bool& aborts) {
143143
std::string planPath =
144144
planColPrefix + _database + "/" + _collection + "/shards/" + _shard;
145145
auto plannedPair = _snapshot.hasAsSlice(planPath);
146-
Slice const& planned = plannedPair.first;
147-
if (!plannedPair.second) {
146+
if (!plannedPair) {
148147
finish("", _shard, true,
149148
"Plan entry for collection " + _collection + " gone");
150149
return false;
151150
}
151+
Slice const& planned = plannedPair.value();
152152

153153
// Now check if _server is still in this plan, note that it could have
154154
// been removed by RemoveFollower already, in which case we simply stop:
@@ -205,8 +205,8 @@ bool FailedFollower::start(bool& aborts) {
205205
VPackArrayBuilder a(&todo);
206206
if (_jb == nullptr) {
207207
auto const& jobIdNode = _snapshot.hasAsNode(toDoPrefix + _jobId);
208-
if (jobIdNode.second) {
209-
jobIdNode.first.toBuilder(todo);
208+
if (jobIdNode) {
209+
jobIdNode->get().toBuilder(todo);
210210
} else {
211211
LOG_TOPIC("4571c", INFO, Logger::SUPERVISION) << "Failed to get key " + toDoPrefix + _jobId +
212212
" from agency snapshot";
@@ -284,8 +284,8 @@ bool FailedFollower::start(bool& aborts) {
284284
std::string foCandsPath = curPath.substr(0, curPath.size() - 7);
285285
foCandsPath += StaticStrings::FailoverCandidates;
286286
auto foCands = this->_snapshot.hasAsSlice(foCandsPath);
287-
if (foCands.second) {
288-
addPreconditionUnchanged(job, foCandsPath, foCands.first);
287+
if (foCands) {
288+
addPreconditionUnchanged(job, foCandsPath, foCands.value());
289289
}
290290
});
291291
// toServer not blocked
@@ -302,11 +302,11 @@ bool FailedFollower::start(bool& aborts) {
302302
// (likely to not exist, avoid warning message by testing first)
303303
if (_snapshot.has(blockedShardsPrefix + _shard)) {
304304
auto jobId = _snapshot.hasAsString(blockedShardsPrefix + _shard);
305-
if (jobId.second && !abortable(_snapshot, jobId.first)) {
305+
if (jobId && !abortable(_snapshot, *jobId)) {
306306
return false;
307-
} else if (jobId.second) {
307+
} else if (jobId) {
308308
aborts = true;
309-
JobContext(PENDING, jobId.first, _snapshot, _agent).abort("failed follower requests abort");
309+
JobContext(PENDING, *jobId, _snapshot, _agent).abort("failed follower requests abort");
310310
return false;
311311
}
312312
}

0 commit comments

Comments
 (0)
0