@@ -45,14 +45,14 @@ CleanOutServer::CleanOutServer(Node const& snapshot, AgentInterface* agent,
4545 auto tmp_server = _snapshot.hasAsString (path + " server" );
4646 auto tmp_creator = _snapshot.hasAsString (path + " creator" );
4747
48- if (tmp_server. second && tmp_creator. second ) {
49- _server = tmp_server.first ;
50- _creator = tmp_creator.first ;
48+ if (tmp_server && tmp_creator) {
49+ _server = tmp_server.value () ;
50+ _creator = tmp_creator.value () ;
5151 } else {
5252 std::stringstream err;
5353 err << " Failed to find job " << _jobId << " in agency." ;
5454 LOG_TOPIC (" 38962" , ERR, Logger::SUPERVISION) << err.str ();
55- finish (tmp_server.first , " " , false , err.str ());
55+ finish (tmp_server.value () , " " , false , err.str ());
5656 _status = FAILED;
5757 }
5858}
@@ -66,8 +66,8 @@ JOB_STATUS CleanOutServer::status() {
6666 return _status;
6767 }
6868
69- Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).first ;
70- Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).first ;
69+ Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).value (). get () ;
70+ Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).value (). get () ;
7171 size_t found = 0 ;
7272
7373 for (auto const & subJob : todos) {
@@ -85,7 +85,7 @@ JOB_STATUS CleanOutServer::status() {
8585 // timeout here:
8686 auto tmp_time =
8787 _snapshot.hasAsString (pendingPrefix + _jobId + " /timeCreated" );
88- std::string timeCreatedString = tmp_time.first ;
88+ std::string timeCreatedString = tmp_time.value () ;
8989 Supervision::TimePoint timeCreated = stringToTimepoint (timeCreatedString);
9090 Supervision::TimePoint now (std::chrono::system_clock::now ());
9191 if (now - timeCreated > std::chrono::duration<double >(86400.0 )) { // 1 day
@@ -95,7 +95,7 @@ JOB_STATUS CleanOutServer::status() {
9595 return PENDING;
9696 }
9797
98- Node::Children const & failed = _snapshot.hasAsChildren (failedPrefix).first ;
98+ Node::Children const & failed = _snapshot.hasAsChildren (failedPrefix).value (). get () ;
9999 size_t failedFound = 0 ;
100100 for (auto const & subJob : failed) {
101101 if (!subJob.first .compare (0 , _jobId.size () + 1 , _jobId + " -" )) {
@@ -130,7 +130,7 @@ JOB_STATUS CleanOutServer::status() {
130130 }
131131 addRemoveJobFromSomewhere (reportTrx, " Pending" , _jobId);
132132 Builder job;
133- _snapshot.hasAsBuilder (pendingPrefix + _jobId, job);
133+ std::ignore = _snapshot.hasAsBuilder (pendingPrefix + _jobId, job);
134134 addPutJobIntoSomewhere (reportTrx, " Finished" , job.slice (), " " );
135135 addReleaseServer (reportTrx, _server);
136136 }
@@ -227,8 +227,8 @@ bool CleanOutServer::start(bool& aborts) {
227227 // Check that _to is not in `Target/CleanedServers`:
228228 VPackBuilder cleanedServersBuilder;
229229 auto const & cleanedServersNode = _snapshot.hasAsNode (cleanedPrefix);
230- if (cleanedServersNode. second ) {
231- cleanedServersNode. first .toBuilder (cleanedServersBuilder);
230+ if (cleanedServersNode) {
231+ cleanedServersNode-> get () .toBuilder (cleanedServersBuilder);
232232 } else {
233233 // ignore this check
234234 cleanedServersBuilder.clear ();
@@ -250,8 +250,8 @@ bool CleanOutServer::start(bool& aborts) {
250250 VPackBuilder failedServersBuilder;
251251 if (_snapshot.has (failedServersPrefix)) {
252252 auto const & failedServersNode = _snapshot.hasAsNode (failedServersPrefix);
253- if (failedServersNode. second ) {
254- failedServersNode. first .toBuilder (failedServersBuilder);
253+ if (failedServersNode) {
254+ failedServersNode-> get () .toBuilder (failedServersBuilder);
255255 } else {
256256 // ignore this check
257257 failedServersBuilder.clear ();
@@ -290,7 +290,7 @@ bool CleanOutServer::start(bool& aborts) {
290290 // in _jb:
291291 if (_jb == nullptr ) {
292292 auto tmp_todo = _snapshot.hasAsBuilder (toDoPrefix + _jobId, todo);
293- if (!tmp_todo. second ) {
293+ if (!tmp_todo) {
294294 // Just in case, this is never going to happen, since we will only
295295 // call the start() method if the job is already in ToDo.
296296 LOG_TOPIC (" 1e9a9" , INFO, Logger::SUPERVISION) << " Failed to get key " + toDoPrefix + _jobId +
@@ -345,7 +345,7 @@ bool CleanOutServer::start(bool& aborts) {
345345 addPreconditionServerHealth (*pending, _server, " GOOD" );
346346 addPreconditionUnchanged (*pending, failedServersPrefix, failedServers);
347347 addPreconditionUnchanged (*pending, cleanedPrefix, cleanedServers);
348- addPreconditionUnchanged (*pending, planVersion, _snapshot (planVersion).slice ());
348+ addPreconditionUnchanged (*pending, planVersion, _snapshot. get (planVersion). value (). get ( ).slice ());
349349 }
350350 } // array for transaction done
351351
@@ -367,7 +367,7 @@ bool CleanOutServer::start(bool& aborts) {
367367bool CleanOutServer::scheduleMoveShards (std::shared_ptr<Builder>& trx) {
368368 std::vector<std::string> servers = availableServers (_snapshot);
369369
370- Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).first ;
370+ Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).value (). get () ;
371371 size_t sub = 0 ;
372372
373373 for (auto const & database : databases) {
@@ -379,7 +379,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
379379 continue ;
380380 }
381381
382- for (auto const & shard : collection.hasAsChildren (" shards" ).first ) {
382+ for (auto const & shard : collection.hasAsChildren (" shards" ).value (). get () ) {
383383 // Only shards, which are affected
384384 int found = -1 ;
385385 int count = 0 ;
@@ -395,7 +395,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
395395 }
396396
397397 auto replicationFactor = collection.hasAsString (StaticStrings::ReplicationFactor);
398- bool isSatellite = replicationFactor. second && replicationFactor.first == StaticStrings::Satellite;
398+ bool isSatellite = replicationFactor && replicationFactor.value () == StaticStrings::Satellite;
399399 bool isLeader = (found == 0 );
400400
401401 if (isSatellite) {
@@ -412,7 +412,7 @@ bool CleanOutServer::scheduleMoveShards(std::shared_ptr<Builder>& trx) {
412412 } else {
413413 // Intentionally do nothing. RemoveServer will remove the failed follower
414414 LOG_TOPIC (" 22ca1" , DEBUG, Logger::SUPERVISION) <<
415- " Do nothing for cleanout of follower of the SatelliteCollection " << collection.hasAsString (" id" ).first ;
415+ " Do nothing for cleanout of follower of the SatelliteCollection " << collection.hasAsString (" id" ).value () ;
416416 continue ;
417417 }
418418 } else {
@@ -467,11 +467,11 @@ bool CleanOutServer::checkFeasibility() {
467467 uint64_t maxReplFact = 1 ;
468468 std::vector<std::string> tooLargeCollections;
469469 std::vector<uint64_t > tooLargeFactors;
470- Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).first ;
470+ Node::Children const & databases = _snapshot.hasAsChildren (" /Plan/Collections" ).value (). get () ;
471471 for (auto const & database : databases) {
472472 for (auto const & collptr : database.second ->children ()) {
473473 try {
474- uint64_t replFact = (*collptr.second ).hasAsUInt (" replicationFactor" ).first ;
474+ uint64_t replFact = (*collptr.second ).hasAsUInt (" replicationFactor" ).value () ;
475475 if (replFact > numRemaining) {
476476 tooLargeCollections.push_back (collptr.first );
477477 tooLargeFactors.push_back (replFact);
@@ -522,8 +522,8 @@ arangodb::Result CleanOutServer::abort(std::string const& reason) {
522522 }
523523
524524 // Abort all our subjobs:
525- Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).first ;
526- Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).first ;
525+ Node::Children const & todos = _snapshot.hasAsChildren (toDoPrefix).value (). get () ;
7E35
526+ Node::Children const & pends = _snapshot.hasAsChildren (pendingPrefix).value (). get () ;
527527
528528 std::string childAbortReason = " parent job aborted - reason: " + reason;
529529
0 commit comments