8000 don't return any in-progress indexes by jsteemann · Pull Request #10431 · arangodb/arangodb · GitHub
[go: up one dir, main page]

Skip to content

don't return any in-progress indexes #10431

New issue

Have a question about this project? 8000 Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Nov 14, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions arangod/IResearch/IResearchView.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -363,11 +363,12 @@ arangodb::Result IResearchView::appendVelocyPackImpl( // append JSON
static const std::function<bool(irs::string_ref const& key)> persistenceAcceptor =
[](irs::string_ref const&) -> bool { return true; };

auto& acceptor = context == Serialization::Persistence || context == Serialization::Inventory
auto& acceptor =
(context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress || context == Serialization::Inventory)
? persistenceAcceptor
: propertiesAcceptor;

if (context == Serialization::Persistence) {
if (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress) {
if (arangodb::ServerState::instance()->isSingleServer()) {
auto res = arangodb::LogicalViewHelperStorageEngine::properties(builder, *this);

Expand Down Expand Up @@ -404,7 +405,7 @@ arangodb::Result IResearchView::appendVelocyPackImpl( // append JSON
return {};
}

if (context == Serialization::Persistence) {
if (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress) {
IResearchViewMetaState metaState;

for (auto& entry : _links) {
Expand Down
4 changes: 3 additions & 1 deletion arangod/IResearch/IResearchViewCoordinator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,9 @@ arangodb::Result IResearchViewCoordinator::appendVelocyPackImpl(

auto* acceptor = &propertiesAcceptor;

if (context == Serialization::Persistence || context == Serialization::Inventory) {
if (context == Serialization::Persistence ||
context == Serialization::PersistenceWithInProgress ||
context == Serialization::Inventory) {
auto res = arangodb::LogicalViewHelperClusterInfo::properties(builder, *this);

if (!res.ok()) {
Expand Down
6 changes: 3 additions & 3 deletions arangod/MMFiles/MMFilesCollection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ arangodb::Result MMFilesCollection::persistProperties() {
try {
auto infoBuilder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
MMFilesCollectionMarker marker(TRI_DF_MARKER_VPACK_CHANGE_COLLECTION,
_logicalCollection.vocbase().id(),
_logicalCollection.id(), infoBuilder.slice());
Expand Down Expand Up @@ -2284,7 +2284,7 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(transaction::Methods& trx,
if (!engine->inRecovery()) {
auto builder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
_logicalCollection.properties(builder.slice(),
false); // always a full-update
}
Expand Down Expand Up @@ -2422,7 +2422,7 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) {
{
auto builder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);

_logicalCollection.properties(builder.slice(),
false); // always a full-update
Expand Down
3 changes: 3 additions & 0 deletions arangod/RestHandler/RestIndexHandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,9 @@ RestStatus RestIndexHandler::getSelectivityEstimates() {
builder.add(StaticStrings::Code, VPackValue(static_cast<int>(rest::ResponseCode::OK)));
builder.add("indexes", VPackValue(VPackValueType::Object));
for (std::shared_ptr<Index> idx : idxs) {
if (idx->inProgress() || idx->isHidden()) {
continue;
}
std::string name = coll->name();
name.push_back(TRI_INDEX_HANDLE_SEPARATOR_CHR);
name.append(std::to_string(idx->id()));
Expand Down
4 changes: 2 additions & 2 deletions arangod/RocksDBEngine/RocksDBCollection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(VPackSlice const& info,
if (!engine->inRecovery()) { // write new collection marker
auto builder = _logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
VPackBuilder indexInfo;
idx->toVelocyPack(indexInfo, Index::makeFlags(Index::Serialize::Internals));
res = engine->writeCreateCollectionMarker(_logicalCollection.vocbase().id(),
Expand Down Expand Up @@ -548,7 +548,7 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
auto builder = // RocksDB path
_logicalCollection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);

// log this event in the WAL and in the collection meta-data
res = engine->writeCreateCollectionMarker( // write marker
Expand Down
12 changes: 6 additions & 6 deletions arangod/RocksDBEngine/RocksDBEngine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1241,7 +1241,7 @@ std::string RocksDBEngine::createCollection(TRI_vocbase_t& vocbase,

auto builder = collection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(cid));

int res =
Expand Down Expand Up @@ -1399,7 +1399,7 @@ void RocksDBEngine::changeCollection(TRI_vocbase_t& vocbase,
LogicalCollection const& collection, bool doSync) {
auto builder = collection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
int res =
writeCreateCollectionMarker(vocbase.id(), collection.id(), builder.slice(),
RocksDBLogValue::CollectionChange(vocbase.id(),
Expand All @@ -1415,7 +1415,7 @@ arangodb::Result RocksDBEngine::renameCollection(TRI_vocbase_t& vocbase,
std::string const& oldName) {
auto builder = collection.toVelocyPackIgnore(
{"path", "statusString"},
LogicalDataSource::Serialization::Persistence);
LogicalDataSource::Serialization::PersistenceWithInProgress);
int res = writeCreateCollectionMarker(
vocbase.id(), collection.id(), builder.slice(),
RocksDBLogValue::CollectionRename(vocbase.id(), collection.id(), arangodb::velocypack::StringRef(oldName)));
Expand All @@ -1442,7 +1442,7 @@ Result RocksDBEngine::createView(TRI_vocbase_t& vocbase, TRI_voc_cid_t id,
VPackBuilder props;

props.openObject();
view.properties(props, LogicalDataSource::Serialization::Persistence);
view.properties(props, LogicalDataSource::Serialization::PersistenceWithInProgress);
props.close();

RocksDBValue const value = RocksDBValue::View(props.slice());
Expand All @@ -1467,7 +1467,7 @@ arangodb::Result RocksDBEngine::dropView(TRI_vocbase_t const& vocbase,
VPackBuilder builder;

builder.openObject();
view.properties(builder, LogicalDataSource::Serialization::Persistence);
view.properties(builder, LogicalDataSource::Serialization::PersistenceWithInProgress);
builder.close();

auto logValue =
Expand Down Expand Up @@ -1512,7 +1512,7 @@ Result RocksDBEngine::changeView(TRI_vocbase_t& vocbase,
VPackBuilder infoBuilder;

infoBuilder.openObject();
view.properties(infoBuilder, LogicalDataSource::Serialization::Persistence);
view.properties(infoBuilder, LogicalDataSource::Serialization::PersistenceWithInProgress);
infoBuilder.close();

RocksDBLogValue log = RocksDBLogValue::ViewChange(vocbase.id(), view.id());
Expand Down
29 changes: 20 additions & 9 deletions arangod/Transaction/Methods.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -565,6 +565,8 @@ std::pair<bool, bool> transaction::Methods::findIndexHandleForAndNode(
auto considerIndex = [&bestIndex, &bestCost, &bestSupportsFilter, &bestSupportsSort,
&indexes, node, reference, itemsInCollection,
&sortCondition](std::shared_ptr<Index> const& idx) -> void {
TRI_ASSERT(!idx->inProgress());

double filterCost = 0.0;
double sortCost = 0.0;
size_t itemsInIndex = itemsInCollection;
Expand Down Expand Up @@ -2941,6 +2943,8 @@ bool transaction::Methods::getIndexForSortCondition(

auto considerIndex = [reference, sortCondition, itemsInIndex, &bestCost, &bestIndex,
&coveredAttributes](std::shared_ptr<Index> const& idx) -> void {
TRI_ASSERT(!idx->inProgress());

Index::SortCosts costs =
idx->supportsSortCondition(sortCondition, reference, itemsInIndex);
if (costs.supportsCondition &&
Expand Down Expand Up @@ -3016,6 +3020,7 @@ std::unique_ptr<IndexIterator> transaction::Methods::indexScanForCondition(
}

// Now create the Iterator
TRI_ASSERT(!idx->inProgress());
return idx->iteratorForCondition(this, condition, var, opts);
}

Expand Down Expand Up @@ -3218,7 +3223,7 @@ Result transaction::Methods::unlockRecursive(TRI_voc_cid_t cid, AccessMode::Type

/// @brief get list of indexes for a collection
std::vector<std::shared_ptr<Index>> transaction::Methods::indexesForCollection(
std::string const& collectionName, bool withHidden) {
std::string const& collectionName) {
if (_state->isCoordinator()) {
return indexesForCollectionCoordinator(collectionName);
}
Expand All @@ -3227,13 +3232,12 @@ std::vector<std::shared_ptr<Index>> transaction::Methods::indexesForCollection(
TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName, AccessMode::Type::READ);
LogicalCollection* document = documentCollection(trxCollection(cid));
std::vector<std::shared_ptr<Index>> indexes = document->getIndexes();
if (!withHidden) {
indexes.erase(std::remove_if(indexes.begin(), indexes.end(),
[](std::shared_ptr<Index> x) {
return x->isHidden();
}),
indexes.end());
}

indexes.erase(std::remove_if(indexes.begin(), indexes.end(),
[](std::shared_ptr<Index> const& x) {
return x->isHidden();
}),
indexes.end());
return indexes;
}

Expand Down Expand Up @@ -3264,7 +3268,14 @@ std::vector<std::shared_ptr<Index>> transaction::Methods::indexesForCollectionCo
collection->clusterIndexEstimates(true);
}

return collection->getIndexes();
std::vector<std::shared_ptr<Index>> indexes = collection->getIndexes();

indexes.erase(std::remove_if(indexes.begin(), indexes.end(),
[](std::shared_ptr<Index> const& x) {
return x->isHidden();
}),
indexes.end());
return indexes;
}

/// @brief get the index by it's identifier. Will either throw or
Expand Down
2 changes: 1 addition & 1 deletion arangod/Transaction/Methods.h
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ class Methods {

/// @brief get all indexes for a collection name
ENTERPRISE_VIRT std::vector<std::shared_ptr<arangodb::Index>> indexesForCollection(
std::string const&, bool withHidden = false);
std::string const& collectionName);

/// @brief Lock all collections. Only works for selected sub-classes
virtual int lockCollections();
Expand Down
7 changes: 4 additions & 3 deletions arangod/VocBase/LogicalCollection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,8 @@ void LogicalCollection::toVelocyPackForClusterInventory(VPackBuilder& result,

arangodb::Result LogicalCollection::appendVelocyPack(arangodb::velocypack::Builder& result,
Serialization context) const {
bool const forPersistence = (context == Serialization::Persistence);
bool const forPersistence = (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress);
bool const showInProgress = (context == Serialization::PersistenceWithInProgress);

// We write into an open object
TRI_ASSERT(result.isOpenObject());
Expand Down Expand Up @@ -673,8 +674,8 @@ arangodb::Result LogicalCollection::appendVelocyPack(arangodb::velocypack::Build
if (forPersistence) {
indexFlags = Index::makeFlags(Index::Serialize::Internals);
}
auto filter = [indexFlags, forPersistence](arangodb::Index const* idx, decltype(Index::makeFlags())& flags) {
if (forPersistence || (!idx->inProgress() && !idx->isHidden())) {
auto filter = [indexFlags, forPersistence, showInProgress](arangodb::Index const* idx, decltype(Index::makeFlags())& flags) {
if ((forPersistence || !idx->isHidden()) && (showInProgress || !idx->inProgress())) {
flags = indexFlags;
return true;
}
Expand Down
2 changes: 1 addition & 1 deletion arangod/VocBase/LogicalDataSource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ Result LogicalDataSource::properties(velocypack::Builder& builder,
// note: includeSystem and forPersistence are not 100% synonymous,
// however, for our purposes this is an okay mapping; we only set
// includeSystem if we are persisting the properties
if (context == Serialization::Persistence) {
if (context == Serialization::Persistence || context == Serialization::PersistenceWithInProgress) {
builder.add(StaticStrings::DataSourceDeleted, velocypack::Value(deleted()));
builder.add(StaticStrings::DataSourceSystem, velocypack::Value(system()));

Expand Down
2 changes: 2 additions & 0 deletions arangod/VocBase/LogicalDataSource.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,8 @@ class LogicalDataSource {
Properties,
// object will be saved in storage engine
Persistence,
// object will be saved in storage engine
PersistenceWithInProgress,
// object will be replicated or dumped/restored
Inventory
};
Expand Down
130 changes: 130 additions & 0 deletions tests/js/common/shell/aql-index-usage-rocksdb.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual, assertNotEqual, assertTrue */

////////////////////////////////////////////////////////////////////////////////
/// @brief test index usage
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Copyright 2018, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////

let jsunity = require("jsunity");

let arangodb = require("@arangodb");
let db = arangodb.db;
let tasks = require("@arangodb/tasks");

function IndexUsageSuite () {
const cnData = "UnitTestsCollection"; // used for test data
const cnComm = "UnitTestsCommunication"; // used for communication

return {

setUp : function () {
db._drop(cnData);
db._drop(cnComm);
db._create(cnData);
db._create(cnComm);

let docs = [];
for (let i = 0; i < 5000; ++i) {
docs.push({ value: "test" + i });
}
db[cnData].insert(docs);
},

tearDown : function () {
db._drop(cnData);
db._drop(cnComm);
},

testIndexUsage : function () {
let task = tasks.register({
command: function(params) {
require('jsunity').jsUnity.attachAssertions();
let db = require("internal").db;
let comm = db[params.cnComm];
let errors = require("@arangodb").errors;
comm.insert({ _key: "runner1", value: 0 });

while (!comm.exists("runner2")) {
require("internal").sleep(0.02);
}

let success = 0;
let time = require("internal").time;
let start = time();
do {
try {
db._query("FOR doc IN " + params.cnData + " FILTER doc.value > 10 LIMIT 10 RETURN doc");
comm.update("runner1", { value: ++success });
} catch (err) {
// if the index that was picked for the query is dropped in the meantime,
// we will get the following error back
assertEqual(err.errorNum, errors.ERROR_QUERY_BAD_JSON_PLAN.code);
}
} while (time() - start < 10.0);
},
params: { cnComm, cnData }
});

let comm = db[cnComm];
comm.insert({ _key: "runner2" });
while (!comm.exists("runner1")) {
require("internal").sleep(0.02);
}

let time = require("internal").time;
let start = time();
let success = 0;
do {
let indexes = db[cnData].indexes();
if (indexes.length > 1) {
db[cnData].dropIndex(indexes[1]);
}
db[cnData].ensureIndex({ type: "hash", fields: ["value"], inBackground: true });
++success;
} while (time() - start < 10.0);

while (true) {
try {
tasks.get(task);
require("internal").wait(0.25, false);
} catch (err) {
// "task not found" means the task is finished
break;
}
}

assertEqual(2, comm.count());
let doc = comm.document("runner1");
assertTrue(doc.value > 0, doc);
assertTrue(success > 0, success);
},

};
}

jsunity.run(IndexUsageSuite);

return jsunity.done();
0