@@ -45,14 +45,14 @@ CleanOutServer::CleanOutServer(Node const& snapshot, AgentInterface* agent,
45
45
auto tmp_server = _snapshot.hasAsString (path + " server" );
46
46
auto tmp_creator = _snapshot.hasAsString (path + " creator" );
47
47
48
- if (tmp_server. second && tmp_creator. second ) {
49
- _server = tmp_server.first ;
50
- _creator = tmp_creator.first ;
48
+ if (tmp_server && tmp_creator) {
49
+ _server = tmp_server.value () ;
50
+ _creator = tmp_creator.value () ;
51
51
} else {
52
52
std::stringstream err;
53
53
err << " Failed to find job " << _jobId << " in agency." ;
54
54
LOG_TOPIC (" 38962" , ERR, Logger::SUPERVISION) << err.str ();
55
- finish (tmp_server.first , " " , false , err.str ());
55
+ finish (tmp_server.value () , " " , false , err.str ());
56
56
_status = FAILED;
57
57
}
58
58
}
@@ -66,8 +66,8 @@ JOB_STATUS CleanOutServer::status() {
66
66
return _status;
67
67
}
68
68
69
- Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).first ;
70
- Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).first ;
69
+ Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).value (). get () ;
70
+ Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).value (). get () ;
71
71
size_t found = 0 ;
72
72
73
73
for (auto const & subJob : todos) {
@@ -85,7 +85,7 @@ JOB_STATUS CleanOutServer::status() {
85
85
// timeout here:
86
86
auto tmp_time =
87
87
_snapshot.hasAsString (pendingPrefix + _jobId + " /timeCreated" );
88
- std::string timeCreatedString = tmp_time.first ;
88
+ std::string timeCreatedString = tmp_time.value () ;
89
89
Supervision::TimePoint timeCreated = stringToTimepoint (timeCreatedString);
90
90
Supervision::TimePoint now (std::chrono::system_clock::now ());
91
91
if (now - timeCreated > std::chrono::duration<double >(86400.0 )) { // 1 day
@@ -95,7 +95,7 @@ JOB_STATUS CleanOutServer::status() {
95
95
return PENDING;
96
96
}
97
97
98
- Node::Children const & failed = _snapshot.hasAsChildren (failedPrefix).first ;
98
+ Node::Children const & failed = _snapshot.hasAsChildren (failedPrefix).value (). get () ;
99
99
size_t failedFound = 0 ;
100
100
for (auto const & subJob : failed) {
101
101
if (!subJob.first .compare (0 , _jobId.size () + 1 , _jobId + " -" )) {
@@ -130,7 +130,7 @@ JOB_STATUS CleanOutServer::status() {
130
130
}
131
131
addRemoveJobFromSomewhere (reportTrx, " Pending" , _jobId);
132
132
Builder job;
133
- _snapshot.hasAsBuilder (pendingPrefix + _jobId, job);
133
+ std::ignore = _snapshot.hasAsBuilder (pendingPrefix + _jobId, job);
134
134
addPutJobIntoSomewhere (reportTrx, " Finished" , job.slice (), " " );
135
135
addReleaseServer (reportTrx, _server);
136
136
}
@@ -227,8 +227,8 @@ bool CleanOutServer::start(bool& aborts) {
227
227
// Check that _to is not in `Target/CleanedServers`:
228
228
VPackBuilder cleanedServersBuilder;
229
229
auto const & cleanedServersNode = _snapshot.hasAsNode (cleanedPrefix);
230
- if (cleanedServersNode. second ) {
231
- cleanedServersNode. first .toBuilder (cleanedServersBuilder);
230
+ if (cleanedServersNode) {
231
+ cleanedServersNode-> get () .toBuilder (cleanedServersBuilder);
232
232
} else {
233
233
// ignore this check
234
234
cleanedServersBuilder.clear ();
@@ -250,8 +250,8 @@ bool CleanOutServer::start(bool& aborts) {
250
250
VPackBuilder failedServersBuilder;
251
251
if (_snapshot.has (failedServersPrefix)) {
252
252
auto const & failedServersNode = _snapshot.hasAsNode (failedServersPrefix);
253
- if (failedServersNode. second ) {
254
- failedServersNode. first .toBuilder (failedServersBuilder);
253
+ if (failedServersNode) {
254
+ failedServersNode-> get () .toBuilder (failedServersBuilder);
255
255
} else {
256
256
// ignore this check
257
257
failedServersBuilder.clear ();
@@ -290,7 +290,7 @@ bool CleanOutServer::start(bool& aborts) {
290
290
// in _jb:
291
291
if (_jb == nullptr ) {
292
292
auto tmp_todo = _snapshot.hasAsBuilder (toDoPrefix + _jobId, todo);
293
- if (!tmp_todo. second ) {
293
+ if (!tmp_todo) {
294
294
// Just in case, this is never going to happen, since we will only
295
295
// call the start() method if the job is already in ToDo.
296
296
LOG_TOPIC (" 1e9a9" , INFO, Logger::SUPERVISION) << " Failed to get key " + toDoPrefix + _jobId +
@@ -345,7 +345,7 @@ bool CleanOutServer::start(bool& aborts) {
345
345
addPreconditionServerHealth (*pending, _server, " GOOD" );
346
346
addPreconditionUnchanged (*pending, failedServersPrefix, failedServers);
347
347
addPreconditionUnchanged (*pending, cleanedPrefix, cleanedServers);
348
- addPreconditionUnchanged (*pending, planVersion, _snapshot (planVersion).slice ());
348
+ addPreconditionUnchanged (*pending, planVersion, _snapshot. get (planVersion). value (). get ( ).slice ());
349
349
}
350
350
} // array for transaction done
351
351
@@ -367,7 +367,7 @@ bool CleanOutServer::start(bool& aborts) {
367
367
bool CleanOutServer::scheduleMoveShards (std::shared_ptr<Builder>& trx) {
368
368
std::vector<std::string> servers = availableServers (_snapshot);
369
369
370
- Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).first ;
370
+ Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).value (). get () ;
371
371
size_t sub = 0 ;
372
372
373
373
for (auto const & database : databases) {
@@ -379,7 +379,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
379
379
continue ;
380
380
}
381
381
382
- for (auto const & shard : collection.hasAsChildren (" shards" ).first ) {
382
+ for (auto const & shard : collection.hasAsChildren (" shards" ).value (). get () ) {
383
383
// Only shards, which are affected
384
384
int found = -1 ;
385
385
int count = 0 ;
@@ -395,7 +395,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
395
395
}
396
396
397
397
auto replicationFactor = collection.hasAsString (StaticStrings::ReplicationFactor);
398
- bool isSatellite = replicationFactor. second && replicationFactor.first == StaticStrings::Satellite;
398
+ bool isSatellite = replicationFactor && replicationFactor.value () == StaticStrings::Satellite;
399
399
bool isLeader = (found == 0 );
400
400
401
401
if (isSatellite) {
@@ -412,7 +412,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
412
412
} else {
413
413
// Intentionally do nothing. RemoveServer will remove the failed follower
414
414
LOG_TOPIC (" 22ca1" , DEBUG, Logger::SUPERVISION) <<
415
- " Do nothing for cleanout of follower of the SatelliteCollection " << collection.hasAsString (" id" ).first ;
415
+ " Do nothing for cleanout of follower of the SatelliteCollection " << collection.hasAsString (" id" ).value () ;
416
416
continue ;
417
417
}
418
418
} else {
@@ -467,11 +467,11 @@ bool CleanOutServer::checkFeasibility() {
467
467
uint64_t maxReplFact = 1 ;
468
468
std::vector<std::string> tooLargeCollections;
469
469
std::vector<uint64_t > tooLargeFactors;
470
- Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).first ;
470
+ Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).value (). get () ;
471
471
for (auto const & database : databases) {
472
472
for (auto const & collptr : database.second ->children ()) {
473
473
try {
474
- uint64_t replFact = (*collptr.second ).hasAsUInt (" replicationFactor" ).first ;
474
+ uint64_t replFact = (*collptr.second ).hasAsUInt (" replicationFactor" ).value () ;
475
475
if (replFact > numRemaining) {
476
476
tooLargeCollections.push_back (collptr.first );
477
477
tooLargeFactors.push_back (replFact);
@@ -522,8 +522,8 @@ arangodb::Result CleanOutServer::abort(std::string const& reason) {
522
522
}
523
523
524
524
// Abort all our subjobs:
525
- Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).first ;
526
- Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).first ;
525
+ Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).value (). get () ;
526
+ Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).value (). get () ;
527
527
528
528
std::string childAbortReason = " parent job aborted - reason: " + reason;
529
529
0 commit comments