From 5918899d969f2c7bfe497bfeae249a91346a4e03 Mon Sep 17 00:00:00 2001 From: maierlars Date: Fri, 12 Mar 2021 09:49:11 +0100 Subject: [PATCH 1/2] Added support for unique constraint. --- arangod/RocksDBEngine/RocksDBIndexFactory.cpp | 45 ++++- arangod/RocksDBEngine/RocksDBKeyBounds.cpp | 1 + arangod/RocksDBEngine/RocksDBTypes.cpp | 8 + arangod/RocksDBEngine/RocksDBTypes.h | 3 +- arangod/RocksDBEngine/RocksDBValue.cpp | 5 + arangod/RocksDBEngine/RocksDBValue.h | 1 + arangod/RocksDBEngine/RocksDBZkdIndex.cpp | 156 +++++++++++++----- arangod/RocksDBEngine/RocksDBZkdIndex.h | 23 ++- .../aql/aql-optimizer-zkdindex-multi.js | 2 - 9 files changed, 200 insertions(+), 44 deletions(-) diff --git a/arangod/RocksDBEngine/RocksDBIndexFactory.cpp b/arangod/RocksDBEngine/RocksDBIndexFactory.cpp index cb17d3ed1998..bfd3135c6374 100644 --- a/arangod/RocksDBEngine/RocksDBIndexFactory.cpp +++ b/arangod/RocksDBEngine/RocksDBIndexFactory.cpp @@ -263,6 +263,48 @@ struct SecondaryIndexFactory : public DefaultIndexFactory { } }; +struct ZkdIndexFactory : public DefaultIndexFactory { + ZkdIndexFactory(arangodb::application_features::ApplicationServer& server) + : DefaultIndexFactory(server, Index::TRI_IDX_TYPE_ZKD_INDEX) {} + + std::shared_ptr instantiate(arangodb::LogicalCollection& collection, + arangodb::velocypack::Slice const& definition, + IndexId id, + bool isClusterConstructor) const override { + if (auto isUnique = definition.get(StaticStrings::IndexUnique).isTrue(); isUnique) { + return std::make_shared(id, collection, definition); + } + + return std::make_shared(id, collection, definition); + } + + virtual arangodb::Result normalize( // normalize definition + arangodb::velocypack::Builder& normalized, // normalized definition (out-param) + arangodb::velocypack::Slice definition, // source definition + bool isCreation, // definition for index creation + TRI_vocbase_t const& vocbase // index vocbase + ) const override { + TRI_ASSERT(normalized.isOpenObject()); + normalized.add(arangodb::StaticStrings::IndexType, + arangodb::velocypack::Value( + arangodb::Index::oldtypeName(Index::TRI_IDX_TYPE_ZKD_INDEX))); + + if (isCreation && !ServerState::instance()->isCoordinator() && + !definition.hasKey("objectId")) { + normalized.add("objectId", + arangodb::velocypack::Value(std::to_string(TRI_NewTickServer()))); + } + + if (auto isSparse = definition.get(StaticStrings::IndexSparse).isTrue(); isSparse) { + THROW_ARANGO_EXCEPTION_MESSAGE( + TRI_ERROR_BAD_PARAMETER, + "zkd index does not support sparse property"); + } + + return IndexFactory::enhanceJsonIndexGeneric(definition, normalized, isCreation); + } +}; + struct TtlIndexFactory : public DefaultIndexFactory { explicit TtlIndexFactory(arangodb::application_features::ApplicationServer& server, arangodb::Index::IndexType type) @@ -348,8 +390,7 @@ RocksDBIndexFactory::RocksDBIndexFactory(application_features::ApplicationServer server); static const TtlIndexFactory ttlIndexFactory(server, arangodb::Index::TRI_IDX_TYPE_TTL_INDEX); static const PrimaryIndexFactory primaryIndexFactory(server); - static const SecondaryIndexFactory zkdIndexFactory( - server); + static const ZkdIndexFactory zkdIndexFactory(server); emplace("edge", edgeIndexFactory); emplace("fulltext", fulltextIndexFactory); diff --git a/arangod/RocksDBEngine/RocksDBKeyBounds.cpp b/arangod/RocksDBEngine/RocksDBKeyBounds.cpp index d14574d024f8..c529948a84a2 100644 --- a/arangod/RocksDBEngine/RocksDBKeyBounds.cpp +++ b/arangod/RocksDBEngine/RocksDBKeyBounds.cpp @@ -229,6 +229,7 @@ rocksdb::ColumnFamilyHandle* RocksDBKeyBounds::columnFamily() const { case RocksDBEntryType::LegacyGeoIndexValue: case RocksDBEntryType::GeoIndexValue: case RocksDBEntryType::ZkdIndexValue: + case RocksDBEntryType::UniqueZkdIndexValue: return RocksDBColumnFamilyManager::get(RocksDBColumnFamilyManager::Family::GeoIndex); case RocksDBEntryType::Database: case RocksDBEntryType::Collection: diff --git a/arangod/RocksDBEngine/RocksDBTypes.cpp b/arangod/RocksDBEngine/RocksDBTypes.cpp index d1da9a2ccd6e..38b24a2d7218 100644 --- a/arangod/RocksDBEngine/RocksDBTypes.cpp +++ b/arangod/RocksDBEngine/RocksDBTypes.cpp @@ -105,6 +105,10 @@ static rocksdb::Slice RevisionTreeValue( static RocksDBEntryType zkdIndexValue = RocksDBEntryType::ZkdIndexValue; static rocksdb::Slice ZdkIndexValue( reinterpret_cast::type*>(&zkdIndexValue), 1); + +static RocksDBEntryType uniqueZkdIndexValue = RocksDBEntryType::UniqueZkdIndexValue; +static rocksdb::Slice UniqueZdkIndexValue( + reinterpret_cast::type*>(&uniqueZkdIndexValue), 1); } // namespace char const* arangodb::rocksDBEntryTypeName(arangodb::RocksDBEntryType type) { @@ -147,6 +151,8 @@ char const* arangodb::rocksDBEntryTypeName(arangodb::RocksDBEntryType type) { return "RevisionTreeValue"; case arangodb::RocksDBEntryType::ZkdIndexValue: return "ZkdIndexValue"; + case arangodb::RocksDBEntryType::UniqueZkdIndexValue: + return "UniqueZkdIndexValue"; } return "Invalid"; } @@ -247,6 +253,8 @@ rocksdb::Slice const& arangodb::rocksDBSlice(RocksDBEntryType const& type) { return RevisionTreeValue; case RocksDBEntryType::ZkdIndexValue: return ZdkIndexValue; + case RocksDBEntryType::UniqueZkdIndexValue: + return UniqueZdkIndexValue; } return Placeholder; // avoids warning - errorslice instead ?! diff --git a/arangod/RocksDBEngine/RocksDBTypes.h b/arangod/RocksDBEngine/RocksDBTypes.h index f0353f1ac526..643ab2f80e86 100644 --- a/arangod/RocksDBEngine/RocksDBTypes.h +++ b/arangod/RocksDBEngine/RocksDBTypes.h @@ -54,7 +54,8 @@ enum class RocksDBEntryType : char { View = '>', GeoIndexValue = '?', RevisionTreeValue = '@', - ZkdIndexValue = 'Z' + ZkdIndexValue = 'z', + UniqueZkdIndexValue = 'Z' }; char const* rocksDBEntryTypeName(RocksDBEntryType); diff --git a/arangod/RocksDBEngine/RocksDBValue.cpp b/arangod/RocksDBEngine/RocksDBValue.cpp index 9713bbedc59e..a8e353f8b455 100644 --- a/arangod/RocksDBEngine/RocksDBValue.cpp +++ b/arangod/RocksDBEngine/RocksDBValue.cpp @@ -59,6 +59,10 @@ RocksDBValue RocksDBValue::ZkdIndexValue() { return RocksDBValue(RocksDBEntryType::ZkdIndexValue); } +RocksDBValue RocksDBValue::UniqueZkdIndexValue(LocalDocumentId const& docId) { + return RocksDBValue(RocksDBEntryType::UniqueZkdIndexValue, docId, RevisionId::none()); +} + RocksDBValue RocksDBValue::UniqueVPackIndexValue(LocalDocumentId const& docId) { return RocksDBValue(RocksDBEntryType::UniqueVPackIndexValue, docId, RevisionId::none()); } @@ -152,6 +156,7 @@ RocksDBValue::RocksDBValue(RocksDBEntryType type, LocalDocumentId const& docId, : _type(type), _buffer() { switch (_type) { case RocksDBEntryType::UniqueVPackIndexValue: + case RocksDBEntryType::UniqueZkdIndexValue: case RocksDBEntryType::PrimaryIndexValue: { if (!revision) { _buffer.reserve(sizeof(uint64_t)); diff --git a/arangod/RocksDBEngine/RocksDBValue.h b/arangod/RocksDBEngine/RocksDBValue.h index b17e0d8038c3..b802b8a23bfa 100644 --- a/arangod/RocksDBEngine/RocksDBValue.h +++ b/arangod/RocksDBEngine/RocksDBValue.h @@ -55,6 +55,7 @@ class RocksDBValue { static RocksDBValue EdgeIndexValue(arangodb::velocypack::StringRef const& vertexId); static RocksDBValue VPackIndexValue(); static RocksDBValue ZkdIndexValue(); + static RocksDBValue UniqueZkdIndexValue(LocalDocumentId const& docId); static RocksDBValue UniqueVPackIndexValue(LocalDocumentId const& docId); static RocksDBValue View(VPackSlice const& data); static RocksDBValue ReplicationApplierConfig(VPackSlice const& data); diff --git a/arangod/RocksDBEngine/RocksDBZkdIndex.cpp b/arangod/RocksDBEngine/RocksDBZkdIndex.cpp index ed18acefaf60..19a7b317301f 100644 --- a/arangod/RocksDBEngine/RocksDBZkdIndex.cpp +++ b/arangod/RocksDBEngine/RocksDBZkdIndex.cpp @@ -26,6 +26,7 @@ #include #include +#include #include "RocksDBColumnFamilyManager.h" #include "RocksDBMethods.h" #include "RocksDBZkdIndex.h" @@ -52,9 +53,11 @@ auto coordsToVector(zkd::byte_string_view bs, size_t dim) -> std::vector }*/ namespace arangodb { + +template class RocksDBZkdIndexIterator final : public IndexIterator { public: - RocksDBZkdIndexIterator(LogicalCollection* collection, RocksDBZkdIndex* index, + RocksDBZkdIndexIterator(LogicalCollection* collection, RocksDBZkdIndexBase* index, transaction::Methods* trx, zkd::byte_string min, zkd::byte_string max, std::size_t dim) : IndexIterator(collection, trx), @@ -109,7 +112,13 @@ class RocksDBZkdIndexIterator final : public IndexIterator { _iterState = IterState::SEEK_ITER_TO_CUR; } } else { - auto const documentId = RocksDBKey::indexDocumentId(rocksKey); + auto const documentId = std::invoke([&]{ + if constexpr(isUnique) { + return RocksDBValue::documentId(_iter->value()); + } else { + return RocksDBKey::indexDocumentId(rocksKey); + } + }); std::ignore = callback(documentId); ++i; _iter->Next(); @@ -146,7 +155,7 @@ class RocksDBZkdIndexIterator final : public IndexIterator { IterState _iterState = IterState::SEEK_ITER_TO_CUR; std::unique_ptr _iter; - RocksDBZkdIndex* _index = nullptr; + RocksDBZkdIndexBase* _index = nullptr; }; } // namespace arangodb @@ -193,7 +202,7 @@ auto readDocumentKey(VPackSlice doc, } auto dv = value.getNumericValue(); if (std::isnan(dv)) { - THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, "NaN is not allowed"); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_INVALID_ARITHMETIC_VALUE, "NaN is not allowed"); } v.emplace_back(convertDouble(dv)); } @@ -201,6 +210,43 @@ auto readDocumentKey(VPackSlice doc, return zkd::interleave(v); } +auto boundsForIterator(arangodb::Index const* index, const arangodb::aql::AstNode* node, + const arangodb::aql::Variable* reference, + const arangodb::IndexIteratorOptions& opts) + -> std::pair { + TRI_ASSERT(node->type == arangodb::aql::NODE_TYPE_OPERATOR_NARY_AND); + + std::unordered_map extractedBounds; + std::unordered_set unusedExpressions; + extractBoundsFromCondition(index, node, reference, extractedBounds, unusedExpressions); + + TRI_ASSERT(unusedExpressions.empty()); + + const size_t dim = index->fields().size(); + std::vector min; + min.resize(dim); + std::vector max; + max.resize(dim); + + static const auto ByteStringPosInfinity = zkd::byte_string{std::byte{0x80}}; + static const auto ByteStringNegInfinity = zkd::byte_string{std::byte{0}}; + + for (auto&& [idx, field] : enumerate(index->fields())) { + if (auto it = extractedBounds.find(idx); it != extractedBounds.end()) { + auto const& bounds = it->second; + min[idx] = nodeExtractDouble(bounds.lower.bound_value).value_or(ByteStringNegInfinity); + max[idx] = nodeExtractDouble(bounds.upper.bound_value).value_or(ByteStringPosInfinity); + } else { + min[idx] = ByteStringNegInfinity; + max[idx] = ByteStringPosInfinity; + } + } + + TRI_ASSERT(min.size() == dim); + TRI_ASSERT(max.size() == dim); + + return std::make_pair(zkd::interleave(min), zkd::interleave(max)); +} } // namespace @@ -368,7 +414,7 @@ auto zkd::specializeCondition(arangodb::Index const* index, arangodb::aql::AstNo } -arangodb::Result arangodb::RocksDBZkdIndex::insert( +arangodb::Result arangodb::RocksDBZkdIndexBase::insert( arangodb::transaction::Methods& trx, arangodb::RocksDBMethods* methods, const arangodb::LocalDocumentId& documentId, arangodb::velocypack::Slice doc, const arangodb::OperationOptions& options) { @@ -389,7 +435,7 @@ arangodb::Result arangodb::RocksDBZkdIndex::insert( return Result(); } -arangodb::Result arangodb::RocksDBZkdIndex::remove(arangodb::transaction::Methods& trx, +arangodb::Result arangodb::RocksDBZkdIndexBase::remove(arangodb::transaction::Methods& trx, arangodb::RocksDBMethods* methods, const arangodb::LocalDocumentId& documentId, arangodb::velocypack::Slice doc) { @@ -410,7 +456,7 @@ arangodb::Result arangodb::RocksDBZkdIndex::remove(arangodb::transaction::Method return Result(); } -arangodb::RocksDBZkdIndex::RocksDBZkdIndex(arangodb::IndexId iid, +arangodb::RocksDBZkdIndexBase::RocksDBZkdIndexBase(arangodb::IndexId iid, arangodb::LogicalCollection& coll, const arangodb::velocypack::Slice& info) : RocksDBIndex(iid, coll, info, @@ -420,15 +466,14 @@ arangodb::RocksDBZkdIndex::RocksDBZkdIndex(arangodb::IndexId iid, RocksDBColumnFamilyManager::get(RocksDBColumnFamilyManager::Family::GeoIndex), false) {} -void arangodb::RocksDBZkdIndex::toVelocyPack( +void arangodb::RocksDBZkdIndexBase::toVelocyPack( arangodb::velocypack::Builder& builder, std::underlying_type::type type) const { VPackObjectBuilder ob(&builder); RocksDBIndex::toVelocyPack(builder, type); - builder.add("dimension", VPackValue(_fields.size())); } -arangodb::Index::FilterCosts arangodb::RocksDBZkdIndex::supportsFilterCondition( +arangodb::Index::FilterCosts arangodb::RocksDBZkdIndexBase::supportsFilterCondition( const std::vector>& allIndexes, const arangodb::aql::AstNode* node, const arangodb::aql::Variable* reference, size_t itemsInIndex) const { @@ -436,46 +481,83 @@ arangodb::Index::FilterCosts arangodb::RocksDBZkdIndex::supportsFilterCondition( return zkd::supportsFilterCondition(this, allIndexes, node, reference, itemsInIndex); } -arangodb::aql::AstNode* arangodb::RocksDBZkdIndex::specializeCondition( +arangodb::aql::AstNode* arangodb::RocksDBZkdIndexBase::specializeCondition( arangodb::aql::AstNode* condition, const arangodb::aql::Variable* reference) const { return zkd::specializeCondition(this, condition, reference); } -std::unique_ptr arangodb::RocksDBZkdIndex::iteratorForCondition( +std::unique_ptr arangodb::RocksDBZkdIndexBase::iteratorForCondition( arangodb::transaction::Methods* trx, const arangodb::aql::AstNode* node, const arangodb::aql::Variable* reference, const arangodb::IndexIteratorOptions& opts) { - TRI_ASSERT(node->type == arangodb::aql::NODE_TYPE_OPERATOR_NARY_AND); + auto&& [min, max] = boundsForIterator(this, node, reference, opts); - std::unordered_map extractedBounds; - std::unordered_set unusedExpressions; - extractBoundsFromCondition(this, node, reference, extractedBounds, unusedExpressions); + return std::make_unique>(&_collection, this, trx, + std::move(min), std::move(max), + fields().size()); +} - TRI_ASSERT(unusedExpressions.empty()); - const size_t dim = _fields.size(); - std::vector min; - min.resize(dim); - std::vector max; - max.resize(dim); +std::unique_ptr arangodb::RocksDBUniqueZkdIndex::iteratorForCondition( + arangodb::transaction::Methods* trx, const arangodb::aql::AstNode* node, + const arangodb::aql::Variable* reference, const arangodb::IndexIteratorOptions& opts) { - static const auto ByteStringPosInfinity = zkd::byte_string {std::byte{0x80}}; - static const auto ByteStringNegInfinity = zkd::byte_string {std::byte{0}}; + auto&& [min, max] = boundsForIterator(this, node, reference, opts); - for (auto&& [idx, field] : enumerate(fields())) { - if (auto it = extractedBounds.find(idx); it != extractedBounds.end()) { - auto const& bounds = it->second; - min[idx] = nodeExtractDouble(bounds.lower.bound_value).value_or(ByteStringNegInfinity); - max[idx] = nodeExtractDouble(bounds.upper.bound_value).value_or(ByteStringPosInfinity); - } else { - min[idx] = ByteStringNegInfinity; - max[idx] = ByteStringPosInfinity; + return std::make_unique>(&_collection, this, trx, + std::move(min), std::move(max), + fields().size()); +} + + +arangodb::Result arangodb::RocksDBUniqueZkdIndex::insert( + arangodb::transaction::Methods& trx, arangodb::RocksDBMethods* methods, + const arangodb::LocalDocumentId& documentId, + arangodb::velocypack::Slice doc, const arangodb::OperationOptions& options) { + TRI_ASSERT(_unique == true); + TRI_ASSERT(_sparse == false); + + auto key_value = readDocumentKey(doc, _fields); + + RocksDBKey rocks_key; + rocks_key.constructZkdIndexValue(objectId(), key_value); + + if (!options.checkUniqueConstraintsInPreflight) { + transaction::StringLeaser leased(&trx); + rocksdb::PinnableSlice existing(leased.get()); + if (auto s = methods->GetForUpdate(_cf, rocks_key.string(), &existing); s.ok()) { // detected conflicting index entry + return Result(TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED); + } else if (!s.IsNotFound()) { + return Result(rocksutils::convertStatus(s)); } } - TRI_ASSERT(min.size() == dim); - TRI_ASSERT(max.size() == dim); - return std::make_unique(&_collection, this, trx, - zkd::interleave(min), - zkd::interleave(max), dim); + auto value = RocksDBValue::UniqueZkdIndexValue(documentId); + + if (auto s = methods->PutUntracked(_cf, rocks_key, value.string()); !s.ok()) { + return rocksutils::convertStatus(s); + } + + return Result(); +} + +arangodb::Result arangodb::RocksDBUniqueZkdIndex::remove(arangodb::transaction::Methods& trx, + arangodb::RocksDBMethods* methods, + const arangodb::LocalDocumentId& documentId, + arangodb::velocypack::Slice doc) { + TRI_ASSERT(_unique == true); + TRI_ASSERT(_sparse == false); + + auto key_value = readDocumentKey(doc, _fields); + + RocksDBKey rocks_key; + rocks_key.constructZkdIndexValue(objectId(), key_value); + + auto value = RocksDBValue::ZkdIndexValue(); + auto s = methods->SingleDelete(_cf, rocks_key); + if (!s.ok()) { + return rocksutils::convertStatus(s); + } + + return Result(); } diff --git a/arangod/RocksDBEngine/RocksDBZkdIndex.h b/arangod/RocksDBEngine/RocksDBZkdIndex.h index 9d6a6e401118..16f2d68051ab 100644 --- a/arangod/RocksDBEngine/RocksDBZkdIndex.h +++ b/arangod/RocksDBEngine/RocksDBZkdIndex.h @@ -29,10 +29,10 @@ namespace arangodb { -class RocksDBZkdIndex final : public RocksDBIndex { +class RocksDBZkdIndexBase : public RocksDBIndex { public: - RocksDBZkdIndex(IndexId iid, LogicalCollection& coll, + RocksDBZkdIndexBase(IndexId iid, LogicalCollection& coll, arangodb::velocypack::Slice const& info); void toVelocyPack(velocypack::Builder& builder, std::underlying_type::type type) const override; @@ -61,6 +61,25 @@ class RocksDBZkdIndex final : public RocksDBIndex { const IndexIteratorOptions& opts) override; }; +class RocksDBZkdIndex final : public RocksDBZkdIndexBase { + using RocksDBZkdIndexBase::RocksDBZkdIndexBase; +}; + +class RocksDBUniqueZkdIndex final : public RocksDBZkdIndexBase { + using RocksDBZkdIndexBase::RocksDBZkdIndexBase; + + Result insert(transaction::Methods& trx, RocksDBMethods* methods, + const LocalDocumentId& documentId, arangodb::velocypack::Slice doc, + const OperationOptions& options) override; + Result remove(transaction::Methods& trx, RocksDBMethods* methods, + const LocalDocumentId& documentId, arangodb::velocypack::Slice doc) override; + + std::unique_ptr iteratorForCondition(transaction::Methods* trx, + const aql::AstNode* node, + const aql::Variable* reference, + const IndexIteratorOptions& opts) override; +}; + namespace zkd { struct ExpressionBounds { diff --git a/tests/js/server/aql/aql-optimizer-zkdindex-multi.js b/tests/js/server/aql/aql-optimizer-zkdindex-multi.js index 1e67f6619e71..97bc0802d5e5 100644 --- a/tests/js/server/aql/aql-optimizer-zkdindex-multi.js +++ b/tests/js/server/aql/aql-optimizer-zkdindex-multi.js @@ -113,8 +113,6 @@ function optimizerRuleZkd2dIndexTestSuite() { tearDownAll: function () { col.drop(); }, - - }; for (let x of ["none", "eq"]) { From 2372f61e80070c211397ff7f5b85b4d9bed7759c Mon Sep 17 00:00:00 2001 From: maierlars Date: Fri, 12 Mar 2021 10:01:35 +0100 Subject: [PATCH 2/2] Added tests for unique constraints. --- .../aql/aql-optimizer-zkdindex-unique.js | 110 ++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 tests/js/server/aql/aql-optimizer-zkdindex-unique.js diff --git a/tests/js/server/aql/aql-optimizer-zkdindex-unique.js b/tests/js/server/aql/aql-optimizer-zkdindex-unique.js new file mode 100644 index 000000000000..5976e11dbe8f --- /dev/null +++ b/tests/js/server/aql/aql-optimizer-zkdindex-unique.js @@ -0,0 +1,110 @@ +/* global AQL_EXPLAIN, AQL_EXECUTE, fail */ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2021-2021 ArangoDB GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Tobias Gödderz +//////////////////////////////////////////////////////////////////////////////// + +'use strict'; + +const jsunity = require("jsunity"); +const arangodb = require("@arangodb"); +const internal = require("internal"); +const db = arangodb.db; +const aql = arangodb.aql; +const {assertTrue, assertFalse, assertEqual} = jsunity.jsUnity.assertions; +const _ = require("lodash"); + +const useIndexes = 'use-indexes'; +const removeFilterCoveredByIndex = "remove-filter-covered-by-index"; +const moveFiltersIntoEnumerate = "move-filters-into-enumerate"; + +function optimizerRuleZkd2dIndexTestSuite() { + const colName = 'UnitTestZkdIndexCollection'; + let col; + + return { + setUpAll: function () { + col = db._create(colName); + col.ensureIndex({type: 'zkd', name: 'zkdIndex', fields: ['x', 'y'], unique: true}); + // Insert 1001 points + // (-500, -499.5), (-499.1, -499.4), ..., (0, 0.5), ..., (499.9, 500.4), (500, 500.5) + db._query(aql` + FOR i IN 0..1000 + LET x = (i - 500) / 10 + LET y = x + 0.5 + INSERT {x, y, i} INTO ${col} + `); + }, + + tearDownAll: function () { + col.drop(); + }, + + testIndexAccess: function () { + const query = aql` + FOR d IN ${col} + FILTER 0 <= d.x && d.x <= 1 + RETURN d.x + `; + const explainRes = AQL_EXPLAIN(query.query, query.bindVars); + const appliedRules = explainRes.plan.rules; + const nodeTypes = explainRes.plan.nodes.map(n => n.type).filter(n => !["GatherNode", "RemoteNode"].includes(n)); + assertEqual(["SingletonNode", "IndexNode", "CalculationNode", "ReturnNode"], nodeTypes); + assertTrue(appliedRules.includes(useIndexes)); + assertTrue(appliedRules.includes(removeFilterCoveredByIndex)); + const executeRes = AQL_EXECUTE(query.query, query.bindVars); + const res = executeRes.json; + res.sort(); + assertEqual([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1], res); + }, + + testIndexAccess2: function () { + const query = aql` + FOR d IN ${col} + FILTER 0 <= d.x && d.y <= 1 + RETURN d.x + `; + const explainRes = AQL_EXPLAIN(query.query, query.bindVars); + const appliedRules = explainRes.plan.rules; + const nodeTypes = explainRes.plan.nodes.map(n => n.type).filter(n => !["GatherNode", "RemoteNode"].includes(n)); + assertEqual(["SingletonNode", "IndexNode", "CalculationNode", "ReturnNode"], nodeTypes); + assertTrue(appliedRules.includes(useIndexes)); + assertTrue(appliedRules.includes(removeFilterCoveredByIndex)); + const executeRes = AQL_EXECUTE(query.query, query.bindVars); + const res = executeRes.json; + res.sort(); + assertEqual([0, 0.1, 0.2, 0.3, 0.4, 0.5], res); + }, + + testUniqueConstraint: function () { + col.save({x: 0, y: 0.50001}); + try { + col.save({x: 0, y: 0.5}); + fail(); + } catch (e) { + assertEqual(e.errorNum, internal.errors.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code); + } + } + }; +} + +jsunity.run(optimizerRuleZkd2dIndexTestSuite); + +return jsunity.done();