From 532d00b43f1fae2faca192e9c453a4b56071b437 Mon Sep 17 00:00:00 2001 From: Vadim Kondratev Date: Thu, 23 Jan 2025 12:54:34 +0100 Subject: [PATCH 01/19] arangodb release 3.12.4 --- ARANGO-VERSION | 2 +- CHANGELOG | 299 +- CMakeLists.txt | 2 +- .../system/_admin/aardvark/APP/api-docs.json | 33766 ++++++++++------ 4 files changed, 20674 insertions(+), 13395 deletions(-) diff --git a/ARANGO-VERSION b/ARANGO-VERSION index f27bf615c653..455808f8e199 100644 --- a/ARANGO-VERSION +++ b/ARANGO-VERSION @@ -1 +1 @@ -3.12.4-devel +3.12.4 diff --git a/CHANGELOG b/CHANGELOG index ec0afda410f3..a06c85419db2 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,17 +1,19 @@ -devel ------ +3.12.4 (2025-01-23) +------------------- * Update tzdata as of 23.01.2025. -* FE-488: fix stale variables in execution hook (Keyboard Shortcut > useEffect deps) +* FE-488: fix stale variables in execution hook (Keyboard Shortcut > useEffect + deps). -* FE-489: fix undefined check at Query Editor UI > Options tab > disabled rules and improve type safety. +* FE-489: fix undefined check at Query Editor UI > Options tab > disabled rules + and improve type safety. -* update iresearch 3rd party for clang-19 build +* Update iresearch 3rd party for clang-19 build. -* Let `arangorestore` restore vector indexes **after** data import. This - is necessary, since a vector index can only be created if enough data - for learning is available. +* Let `arangorestore` restore vector indexes **after** data import. This is + necessary, since a vector index can only be created if enough data for + learning is available. * Make `RestImportHandler` asynchronous. This is to avoid some blockages observed in the context of BTS-2047. @@ -19,11 +21,11 @@ devel * Make `arangoimport` more stable. The default batch size is lowered to 4MB (from 8MB) and the default maximal streaming transaction size is increased from 128MB to 512MB. In examples we have seen that the overhead from input - data size to the memory usage in the actual RocksDB transaction can be up - to 80x, therefore `arangoimport` could run into resource limits with the - normal batch size. This is particularly acute for smart edge collections, - where `arangoimport` has to use streaming transactions and where small - edges have to be indexed multiple times. Fixes BTS-2047. + data size to the memory usage in the actual RocksDB transaction can be up to + 80x, therefore `arangoimport` could run into resource limits with the normal + batch size. This is particularly acute for smart edge collections, where + `arangoimport` has to use streaming transactions and where small edges have to + be indexed multiple times. Fixes BTS-2047. * Add optional `factory` property to `params` in vector index definition. The index factory enables to create composite vector indexes supported by FAISS. @@ -44,10 +46,11 @@ devel "factory": "IVF3800_HNSW32,PQ480x8" } }); + ``` -* Improve geo index performance in the cluster with multiple shards. This - fixes BTS-2046. An unnecessary and bad SORT node is removed from the - query plan in the case that a geo index is used. +* Improve geo index performance in the cluster with multiple shards. This fixes + BTS-2046. An unnecessary and bad SORT node is removed from the query plan in + the case that a geo index is used. * Fail queries that use `APPROX_NEAR` functions but do not apply the vector index. @@ -59,10 +62,10 @@ devel COLLECT x = doc.x, y = doc.y RETURN [x, y] ``` - can now employ a persistent index on `["x", "y"]` to drastically speed up - the query. The new optimizer rule is called `use-index-for-collect`. Whether the - rule is applied depends on the selectivity of the index. If the selectivity - is too high, i.e. there are a lot of distinct values, the rule is not applied + can now employ a persistent index on `["x", "y"]` to drastically speed up the + query. The new optimizer rule is called `use-index-for-collect`. Whether the + rule is applied depends on the selectivity of the index. If the selectivity is + too high, i.e. there are a lot of distinct values, the rule is not applied since scanning for distinct values in such a case would not be beneficial. This optimization can be disabled for specific collect statements, using the @@ -71,8 +74,8 @@ devel Currently this only works if there is no `INTO` or `AGGREGATE` clause. Filtering is also not yet supported. -* For collect a new PUSH aggregator was added. It aggregates all values into - an array. For example +* For collect a new PUSH aggregator was added. It aggregates all values into an + array. For example: ``` FOR doc IN col COLLECT x = doc.x AGGREGATE y = PUSH(doc.y) @@ -95,38 +98,39 @@ devel * Introduce new level of index serialization `Maintenance` that excludes trainedData in vector index. -* Enable passing `nProbe` as additional search parameter for APPROX_NEAR - vector functions. `nProbe` defines how many neighboring lists to look into, - it is a trade off between more correct results and time it takes to fetch - results. The syntax looks like this: - ```aql +* Enable passing `nProbe` as additional search parameter for APPROX_NEAR vector + functions. `nProbe` defines how many neighboring lists to look into, it is a + trade off between more correct results and time it takes to fetch results. The + syntax looks like this: + ``` FOR doc IN col LET distance = APPROX_NEAR_L2(doc.vector, @qp, {nProbe: 20}) SORT distance LIMIT 5 RETURN {key: doc._key, distance} - ```` + ``` -* Test a collection before it is leased out from the ConnectionCache. - This helps to detect stale TCP/IP or TLS connections residing in the - ConnectionCache and thus prevents network failures due to delays. +* Test a collection before it is leased out from the ConnectionCache. This helps + to detect stale TCP/IP or TLS connections residing in the ConnectionCache and + thus prevents network failures due to delays. -* [ES-2294] Fix computation of the arangodb_vocbase_shards_read_only_by_write_concern metric. +* [ES-2294] Fix computation of the + arangodb_vocbase_shards_read_only_by_write_concern metric. -* Enable passing `nProbe` as additional search parameter for APPROX_NEAR - vector functions. `nProbe` defines how many neighboring lists to look into, - it is a trade off between more correct results and time it takes to fetch - results. The syntax looks like this: - ```aql +* Enable passing `nProbe` as additional search parameter for APPROX_NEAR vector + functions. `nProbe` defines how many neighboring lists to look into, it is a + trade off between more correct results and time it takes to fetch results. The + syntax looks like this: + ``` FOR doc IN col LET distance = APPROX_NEAR_L2(doc.vector, @qp, {nProbe: 20}) SORT distance LIMIT 5 RETURN {key: doc._key, distance} - ```` + ``` -* Add a vector index based on faiss implementation. The current vector index +* Add a vector index based on faiss implementation. The current vector index implementation supports the L2 and cosine metrics. To create a vector index, - the vector field(s) in the collection must already be populated with data. Here is an - example of a vector index creation: + the vector field(s) in the collection must already be populated with data. + Here is an example of a vector index creation: ``` db..ensureIndex({ type: "vector", @@ -137,65 +141,65 @@ devel "nLists": 100 } }); - ```` + ``` Where the parameters are: - - `metric`: defines how similarity/distance is calculated, can be - l2 or cosine, + - `metric`: defines how similarity/distance is calculated, can be l2 or + cosine, - `nLists`: number of lists to create, - - `dimension`: vector dimensions, ArangoDB will also check that all the vectors - have the correct number of components, - - `trainingIterations`: how many training iterations will be used, - The current limitation is that a vector index can be created only on a single field. - The vector index creation has two phases, training in which the centroids are being determined - from the subset of data, and insertion phase in which the lists are being populated. - To trigger the vector index an appropriate `APPROX_NEAR` function must be used - along with `SORT` and `LIMIT` clauses e.g.: + - `dimension`: vector dimensions, ArangoDB will also check that all the + vectors have the correct number of components, + - `trainingIterations`: how many training iterations will be used, + + The current limitation is that a vector index can be created only on a single + field. The vector index creation has two phases, training in which the + centroids are being determined from the subset of data, and insertion phase in + which the lists are being populated. To trigger the vector index an + appropriate `APPROX_NEAR` function must be used along with `SORT` and `LIMIT` + clauses e.g.: ``` FOR d IN docs LET distance = APPROX_NEAR_L2(d.vector, @queryVector) SORT distance LIMIT 10 RETURN {d, distance} - ```` + ``` This returns the top 10 nearest neighbors to `@queryVector`. -* Introduce expiry time for idle TCP/IP connections in the ConnectionCache - (for `SimpleHttpClient`) with a default of 120s. This is to prevent - errors in replication caused by cloud environments terminating - connections. Also add retries in a few places. Also increase the - timeout in initial sync to transfer up to 5000 documents from 25s to - 900s. This addresses BTS-2011, BTS-2035 and BTS-2042. +* Introduce expiry time for idle TCP/IP connections in the ConnectionCache (for + `SimpleHttpClient`) with a default of 120s. This is to prevent errors in + replication caused by cloud environments terminating connections. Also add + retries in a few places. Also increase the timeout in initial sync to transfer + up to 5000 documents from 25s to 900s. This addresses BTS-2011, BTS-2035 and + BTS-2042. -* Improve the use-index-for-sort optimizer rule: If there is a persistent - index which starts with the same fields as the sort fields, the index - can be used for sorting these first fields that are covered by the index. - Subsequent rows with the same value in these fields are then sorted by - the rest of the sorting fields using a new grouped sort executor. +* Improve the use-index-for-sort optimizer rule: If there is a persistent index + which starts with the same fields as the sort fields, the index can be used + for sorting these first fields that are covered by the index. + Subsequent rows with the same value in these fields are then sorted by the + rest of the sorting fields using a new grouped sort executor. * If a coordinator cannot send a heartbeat to the agency, it will shut down itself after 30 mins (configurable by the startup option `--cluster.no-heartbeat-delay-before-shutdown` whose default is 1800. - This is to ensure that servers which are automatically cleaned up, - because they are away for too long cannot disturb hotbackups by running - transactions. + This is to ensure that servers which are automatically cleaned up, because + they are away for too long cannot disturb hotbackups by running transactions. -* Reduce the time until an expired server is removed from the agency to 1h - (from 24h). This helps with BTS-2039 and thus helps hotbackups to go - through in more cases when coordinators have crashed. The time is - configurable with the `--agency.supervision-expired-servers-grace-time` - command line option. +* Reduce the time until an expired server is removed from the agency to 1h (from + 24h). This helps with BTS-2039 and thus helps hotbackups to go through in more + cases when coordinators have crashed. The time is configurable with the + `--agency.supervision-expired-servers-grace-time` command line option. -* Changed the priority of WebUI request to prevent endless loading if the - server is under load +* Changed the priority of WebUI request to prevent endless loading if the server + is under load -* Truncate error messages by rclone for TransferJobs to avoid excessive - memory use in the agency. Keep only 10 finished TransferJobs in agency. - This fixes BTS-2041. +* Truncate error messages by rclone for TransferJobs to avoid excessive memory + use in the agency. Keep only 10 finished TransferJobs in agency. This fixes + BTS-2041. * Don't cleanup failed agency jobs if they are subjobs of pending jobs. This avoids a bug in CleanOutServer jobs, where such a job could complete - seemingly successfully despite the fact that some MoveShard jobs had - actually failed. This fixes BTS-2022. + seemingly successfully despite the fact that some MoveShard jobs had actually + failed. This fixes BTS-2022. * Disallow the KEEP keyword in conjunction with INTO var = expr when doing COLLECT in an AQL query. The KEEP clause had no effect. @@ -210,11 +214,11 @@ devel * Fix view usage in cached query plans. * Optimized COLLECT queries by pushing collection of INTO values onto database - servers and aggregate on the coordinator. This can be disabled by setting - the option `aggregateIntoExpressionOnDBServers` to false. + servers and aggregate on the coordinator. This can be disabled by setting the + option `aggregateIntoExpressionOnDBServers` to false. -* Make it an error if usePlanCache is set to true but the query is not - eligible for query plan caching. This is better for debugging. +* Make it an error if usePlanCache is set to true but the query is not eligible + for query plan caching. This is better for debugging. * Upgraded OpenSSL to 3.4.0. @@ -222,18 +226,18 @@ devel * BTS-2017: fix CleanOutServer for satellite collections. -* Add metric that counts the total number of lost transaction subordinate - states on the database servers. +* Add metric that counts the total number of lost transaction subordinate states + on the database servers. * FE-483: add support for usePlanCache in query editor UI. * ArangoDB now has an optional AQL query execution plan cache that can be used - to skip query planning and optimization when running the same queries + to skip query planning and optimization when running the same queries repeatedly. This feature is currently experimental. The plan cache is fully optional and is only considered for AQL queries that - have the option `usePlanCache` set to `true`. If the option is not - set to `true`, the plan cache is bypassed. This is also the default. + have the option `usePlanCache` set to `true`. If the option is not set to + `true`, the plan cache is bypassed. This is also the default. If the option is set to `true`, the query's eligibility for being cached is checked first. Any queries that have any of the following query options set @@ -244,33 +248,32 @@ devel - `optimizer.rules` - `shardIds` - Additionally, a query is not eligible for plan caching if it uses attribute + Additionally, a query is not eligible for plan caching if it uses attribute name bind parameters, e.g. `FILTER doc.@attributeName == ...`. Furthermore, queries will not be eligible for plan caching when using value bind parameters in any of the following places: - - specifying the depths for traversals or path queries (e.g. + - specifying the depths for traversals or path queries (e.g. `FOR v, e IN @min..@max OUTBOUND ...`) - referring to a named graph (e.g. `GRAPH @graphName`) - - referring to edge collections used in traversals or path queries - (e.g. `FOR v, e IN 1..2 OUTBOUND 'v/0' @ec...`) + - referring to edge collections used in traversals or path queries (e.g. + `FOR v, e IN 1..2 OUTBOUND 'v/0' @ec...`) - specifying the lookup value for UPSERT operations, e.g. `UPSERT @value...`) - If a query produces any warnings during parsing or query plan optimization, - it is also not eligible for plan caching. + If a query produces any warnings during parsing or query plan optimization, it + is also not eligible for plan caching. Query plans are also not eligible for caching if they contain one of the following execution node types: - SingleRemoteOperationNode (cluster only) - MultipleRemoteModificationNode (cluster only) - - UpsertNode, i.e. the AQL UPSERT functionality. + - UpsertNode, i.e. the AQL UPSERT functionality If a query is eligible for plan caching, the plan cache will be checked using the exact same query string and set of collection bind parameter values. A cached plan entry is only considered identical to the current query if the - query strings are bytewise identical and the set of collection bind - parameters is exactly the same (bind parameter names and bind parameter - values). - If no plan entry can be found in the plan cache, the query is planned and + query strings are bytewise identical and the set of collection bind parameters + is exactly the same (bind parameter names and bind parameter values). + If no plan entry can be found in the plan cache, the query is planned and optimized as usual, and the cached plan will be inserted into the plan cache. - Repeated executions of the same query (same query string and using the same + Repeated executions of the same query (same query string and using the same set of collection bind parameters) will then make use of the cached plan entry, potentially with different value bind parameters. @@ -279,11 +282,11 @@ devel - `maxDNFConditionMembers` - `maxNodesPerCallstack` - Whenever a query uses a cached plan from the plan cache, the query - result will include an attribute `planCacheKey` on the top level when - executing, explaining or profiling a query. The explain and profiling - output will also display a "plan cache key: ..." indicator to show - that a cached query plan was used. + Whenever a query uses a cached plan from the plan cache, the query result will + include an attribute `planCacheKey` on the top level when executing, + explaining or profiling a query. The explain and profiling output will also + display a "plan cache key: ..." indicator to show that a cached query plan was + used. The query plan cache is organized per database. It gets invalidated at the following events: @@ -300,79 +303,79 @@ devel cache in each database. The default value is 128. - `--query.plan-cache-max-memory-usage`: maximum total memory usage for the query plan cache in each database. The default value is 8MB. - - `--query.plan-cache-max-entry-size`: maximum size of an individual entry - in the query plan cache in each database. The default value is 2MB. + - `--query.plan-cache-max-entry-size`: maximum size of an individual entry in + the query plan cache in each database. The default value is 2MB. Note that each database has its own query plan cache, and that these options are used for each individual plan cache. Also note that in a cluster, each coordinator will have its own query plan cache. There are also new APIs to clear the contents of the query plan cache and to retrieve the current plan cache entries. The following HTTP REST APIs exist: - - HTTP DELETE `/_api/query-plan-cache` to delete all entries in the query - plan cache for the current database. This requires write privileges for the + - HTTP DELETE `/_api/query-plan-cache` to delete all entries in the query plan + cache for the current database. This requires write privileges for the current database. - - HTTP GET `/_api/query-plan-cache` to retrieve all entries in the query - plan cache for the current database. This requires read privileges for the + - HTTP GET `/_api/query-plan-cache` to retrieve all entries in the query plan + cache for the current database. This requires read privileges for the current database. In addition, only those query plans will be returned for - which the current user has at least read permissions on all collections - and views included in the query. - In a cluster, these APIs are specific to the coordinator that they are run - on. + which the current user has at least read permissions on all collections and + views included in the query. + In a cluster, these APIs are specific to the coordinator that they are run on. There is also a JavaScript module `@arangodb/query/plan-cache` that exposes the same functionality: - - `require("@arangodb/aql/plan-cache`).clear()` to delete all entries in - the plan cache for the current database. This requires write privileges for - the current database. - - `require("@arangodb/aql/plan-cache`).toArray()` to retrieve all entries - in the plan cache for the current database. This requires read privileges - for the current database. In addition, only those query plans will be - returned for which the current user has at least read permissions on all - collections and views included in the query. - In a cluster, these APIs are specific to the coordinator that they are run - on. + - `require("@arangodb/aql/plan-cache`).clear()` to delete all entries in the + plan cache for the current database. This requires write privileges for the + current database. + - `require("@arangodb/aql/plan-cache`).toArray()` to retrieve all entries in + the plan cache for the current database. This requires read privileges for + the current database. In addition, only those query plans will be returned + for which the current user has at least read permissions on all collections + and views included in the query. + In a cluster, these APIs are specific to the coordinator that they are run on. The following metrics are exposed on a single server and each coordinator to provide insights into how many query plans were served from the query plan cache: - - `arangodb_aql_query_plan_cache_hits_total`: total number of plans looked - up and found in the query plan cache, across all database-specific plan - caches on the instance. + - `arangodb_aql_query_plan_cache_hits_total`: total number of plans looked up + and found in the query plan cache, across all database-specific plan caches + on the instance. - `arangodb_aql_query_plan_cache_misses_total`: total number of plans looked up and not found in the query plan cache, across all database-specific plan caches on the instance. - - `arangodb_aql_query_plan_cache_memory_usage`: total current memory usage - of all query plan caches across all databases on this instance in bytes. + - `arangodb_aql_query_plan_cache_memory_usage`: total current memory usage of + all query plan caches across all databases on this instance in bytes. -* In replication use vpack as transport format throughout, so that even - large and strangely encoded numbers are transported faithfully. +* In replication use vpack as transport format throughout, so that even large + and strangely encoded numbers are transported faithfully. * BTS-2014: Fix delay if write hits early after a hotbackup restore. -* Fix leader resignation race in coordinator, which lead to forgotten - collection read locks on dbservers, which in turn could lead to deadlocks - in the cluster. +* Fix leader resignation race in coordinator, which lead to forgotten collection + read locks on dbservers, which in turn could lead to deadlocks in the cluster. -* Fix shard synchronisation race where after a shard move the new - leader informs followers before it updates Current in the Agency. In - some cases the old leader fell subsequently out of sync. +* Fix shard synchronisation race where after a shard move the new leader informs + followers before it updates Current in the Agency. In some cases the old + leader fell subsequently out of sync. -* Fix a crash with async prefetch and user defined functions in AQL. This - fixes BTS-2003. +* Fix a crash with async prefetch and user defined functions in AQL. This fixes + BTS-2003. * FE-466: use @arangodb/ui library. -* Bring unicode collation back to the exact 3.11 behaviour by shipping the - original 3.11 tables in a icudtl_legacy.dat file. This fixes corruption - bugs which could happen on upgrade to 3.12 because funny international - strings are indexed in VPack indexes. +* Bring unicode collation back to the exact 3.11 behaviour by shipping the + original 3.11 tables in a icudtl_legacy.dat file. This fixes corruption bugs + which could happen on upgrade to 3.12 because funny international strings are + indexed in VPack indexes. -* Implements a faster encoding of doubles to a memcmp format. Makes it a cmake option. - Makes sure that it does not produce different values as the old/slower implementation. - Removes unneeded db._explain in a test +* Implements a faster encoding of doubles to a memcmp format. Makes it a cmake + option. + Makes sure that it does not produce different values as the old/slower + implementation. + Removes unneeded db._explain in a test. * Improve the observability of asynchronous operations by saving all ongoing - asynchronous operations (associated with async and Future) in a registry. + asynchronous operations (associated with async and Future) in a + registry. A REST call gives information about all these operations via a list of all ongoing stacktraces. diff --git a/CMakeLists.txt b/CMakeLists.txt index 2e4e0f4c97ad..76d6b2f0fb9a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,7 +74,7 @@ set(ARANGODB_VERSION_MINOR "12") # when building the nightly ARANGODB_VERSION_PATCH will be set if (NOT DEFINED ARANGODB_VERSION_PATCH) set(ARANGODB_VERSION_PATCH "4") - set(ARANGODB_VERSION_RELEASE_TYPE "devel") + set(ARANGODB_VERSION_RELEASE_TYPE "") set(ARANGODB_VERSION_RELEASE_NUMBER "") else() unset (ARANGODB_VERSION_RELEASE_TYPE) # do not remove space diff --git a/js/apps/system/_admin/aardvark/APP/api-docs.json b/js/apps/system/_admin/aardvark/APP/api-docs.json index 694507fddd4e..ad809daa3d91 100644 --- a/js/apps/system/_admin/aardvark/APP/api-docs.json +++ b/js/apps/system/_admin/aardvark/APP/api-docs.json @@ -2,7 +2,7 @@ "info": { "description": "ArangoDB REST API Interface", "title": "ArangoDB", - "version": "3.12.4-devel" + "version": "3.12.4" }, "openapi": "3.1.0", "paths": { @@ -218,7 +218,7 @@ }, "responses": { "200": { - "description": "Is returned if the backup could be restored. Note that there is an\ninevitable discrepancy between the single server and the cluster. In a\nsingle server, the request returns successfully, but the restore is\nonly executed afterwards. In the cluster, the request only returns when\nthe restore operation has been completed successfully. The cluster\nbehaviour is obviously the desired one, but in a single instance, one\ncannot keep a connection open across a restart.\n" + "description": "Is returned if the backup could be restored. Note that there is an\ninevitable discrepancy between the single server and the cluster. In a\nsingle server, the request returns successfully, but the restore is\nonly executed afterwards. In the cluster, the request only returns when\nthe restore operation has been completed successfully. The cluster\nbehavior is obviously the desired one, but in a single instance, one\ncannot keep a connection open across a restart.\n" }, "400": { "description": "If the restore command is invoked with bad parameters or any HTTP\nmethod other than `POST`, then an *HTTP 400* is returned. The specifics\nare detailed in the returned error document.\n" @@ -420,7 +420,11 @@ "schema": { "properties": { "mode": { - "description": "The mode to put the DB-Server in. Possible values:\n- `\"maintenance\"`\n- `\"normal\"`\n", + "description": "The mode to put the DB-Server in.\n", + "enum": [ + "maintenance", + "normal" + ], "type": "string" }, "timeout": { @@ -1517,442 +1521,152 @@ ] } }, - "/_admin/echo": { - "post": { - "description": "The call returns an object with the servers request information\n", - "operationId": "echoRequest", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "body": { - "description": "The request body can be of any type and is simply forwarded.\n", - "type": "string" - } - }, - "required": [ - "body" - ], - "type": "object" - } + "/_admin/log": { + "get": { + "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/log/entries` instead, which provides the same data in a more\nintuitive and easier to process format.\n\n\nReturns fatal, error, warning or info log messages from the server's global log.\nThe result is a JSON object with the attributes described below.\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", + "operationId": "getLog", + "parameters": [ + { + "description": "Returns all log entries up to log level `upto`. Note that `upto` must be:\n- `fatal` or `0`\n- `error` or `1`\n- `warning` or `2`\n- `info` or `3`\n- `debug` or `4`\nThe default value is `info`.\n", + "in": "query", + "name": "upto", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Returns all log entries of log level `level`. Note that the query parameters\n`upto` and `level` are mutually exclusive.\n", + "in": "query", + "name": "level", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Returns all log entries such that their log entry identifier (`lid` value)\nis greater or equal to `start`.\n", + "in": "query", + "name": "start", + "required": false, + "schema": { + "type": "number" + } + }, + { + "description": "Restricts the result to at most `size` log entries.\n", + "in": "query", + "name": "size", + "required": false, + "schema": { + "type": "number" + } + }, + { + "description": "Starts to return log entries skipping the first `offset` log entries. `offset`\nand `size` can be used for pagination.\n", + "in": "query", + "name": "offset", + "required": false, + "schema": { + "type": "number" + } + }, + { + "description": "Only return the log entries containing the text specified in `search`.\n", + "in": "query", + "name": "search", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Sort the log entries either ascending (if `sort` is `asc`) or descending\n(if `sort` is `desc`) according to their `lid` values. Note that the `lid`\nimposes a chronological order. The default value is `asc`.\n", + "in": "query", + "name": "sort", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Returns all log entries of the specified server. All other query parameters\nremain valid. If no serverId is given, the asked server\nwill reply. This parameter is only meaningful on Coordinators.\n", + "in": "query", + "name": "serverId", + "required": false, + "schema": { + "type": "string" } } - }, + ], "responses": { "200": { "content": { "application/json": { "schema": { "properties": { - "authorized": { - "description": "Whether the session is authorized\n", - "type": "boolean" - }, - "client": { - "description": "Attributes of the client connection\n", - "properties": { - "address": { - "description": "The IP address of the client\n", - "type": "integer" - }, - "id": { - "description": "A server generated ID\n", - "type": "string" - }, - "port": { - "description": "The port of the TCP connection on the client-side\n", - "type": "integer" - } - }, - "required": [ - "address", - "port", - "id" - ], - "type": "object" - }, - "cookies": { - "description": "A list of the cookies you sent\n", - "type": "object" - }, - "database": { - "description": "The name of the database this request was executed on\n", - "type": "string" - }, - "headers": { - "description": "The list of the HTTP headers you sent\n", - "type": "object" - }, - "internals": { - "description": "Contents of the server internals struct\n", - "type": "object" - }, - "isAdminUser": { - "description": "Whether the current user is an administrator\n", - "type": "boolean" - }, - "parameters": { - "description": "An object containing the query parameters\n", - "type": "object" - }, - "path": { - "description": "The relative path of this request (decoded, excluding `/_admin/echo`)\n", - "type": "string" - }, - "portType": { - "description": "The type of the socket, one of `\"tcp/ip\"`, `\"unix\"`, `\"unknown\"`\n", - "type": "string" - }, - "prefix": { - "description": "The prefix of the database\n", - "type": "object" - }, - "protocol": { - "description": "The transport protocol, one of `\"http\"`, `\"https\"`, `\"velocystream\"`\n", + "level": { + "description": "A list of the log levels for all log entries.\n", "type": "string" }, - "rawRequestBody": { - "description": "The sent payload as a JSON-encoded Buffer object\n", - "type": "object" - }, - "rawSuffix": { - "description": "A list of the percent-encoded URL path suffixes\n", + "lid": { + "description": "a list of log entry identifiers. Each log message is uniquely\nidentified by its @LIT{lid} and the identifiers are in ascending\norder.\n", "items": { "type": "string" }, "type": "array" }, - "requestBody": { - "description": "Stringified version of the request body you sent\n", - "type": "string" - }, - "requestType": { - "description": "The HTTP method that was used for the request (`\"POST\"`). The endpoint can be\nqueried using other verbs, too (`\"GET\"`, `\"PUT\"`, `\"PATCH\"`, `\"DELETE\"`).\n", + "text": { + "description": "a list of the texts of all log entries\n", "type": "string" }, - "server": { - "description": "Attributes of the server connection\n", - "properties": { - "address": { - "description": "The bind address of the endpoint this request was sent to\n", - "type": "string" - }, - "endpoint": { - "description": "The endpoint this request was sent to\n", - "type": "string" - }, - "port": { - "description": "The port this request was sent to\n", - "type": "integer" - } - }, - "required": [ - "address", - "port", - "endpoint" - ], - "type": "object" - }, - "suffix": { - "description": "A list of the decoded URL path suffixes. You can query the endpoint with\narbitrary suffixes, e.g. `/_admin/echo/foo/123`\n", + "timestamp": { + "description": "a list of the timestamps as seconds since 1970-01-01 for all log\nentries.\n", "items": { "type": "string" }, "type": "array" }, - "url": { - "description": "The raw request URL\n", + "topic": { + "description": "a list of the topics of all log entries\n", "type": "string" }, - "user": { - "description": "The name of the current user that sent this request\n", - "type": "string" + "totalAmount": { + "description": "the total amount of log entries before pagination.\n", + "type": "integer" } }, "required": [ - "authorized", - "user", - "isAdminUser", - "database", - "url", - "protocol", - "portType", - "server", - "client", - "internals", - "prefix", - "headers", - "requestType", - "requestBody", - "rawRequestBody", - "parameters", - "cookies", - "suffix", - "rawSuffix", - "path" + "lid", + "level", + "timestamp", + "text", + "topic", + "totalAmount" ], "type": "object" } } }, - "description": "Echo was returned successfully.\n" + "description": "" + }, + "400": { + "description": "is returned if invalid values are specified for `upto` or `level`.\n" + }, + "403": { + "description": "is returned if there are insufficient privileges to access the logs.\n" } }, - "summary": "Echo a request", + "summary": "Get the global server logs (deprecated)", "tags": [ - "Administration" + "Monitoring" ] } }, - "/_admin/execute": { - "post": { - "description": "Executes the JavaScript code in the body on the server as the body\nof a function with no arguments. If you have a `return` statement\nthen the return value you produce will be returned as content type\n`application/json`. If the parameter `returnAsJSON` is set to\n`true`, the result will be a JSON object describing the return value\ndirectly, otherwise a string produced by JSON.stringify will be\nreturned.\n\nNote that this API endpoint will only be present if the server was\nstarted with the option `--javascript.allow-admin-execute true`.\n\nThe default value of this option is `false`, which disables the execution of\nuser-defined code and disables this API endpoint entirely.\nThis is also the recommended setting for production.\n", - "operationId": "executeCode", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "body": { - "description": "The request body is the JavaScript code to be executed.\n", - "type": "string" - } - }, - "required": [ - "body" - ], - "type": "object" - } - } - } - }, - "responses": { - "200": { - "description": "is returned when everything went well, or if a timeout occurred. In the\nlatter case a body of type application/json indicating the timeout\nis returned. depending on `returnAsJSON` this is a json object or a plain string.\n" - }, - "403": { - "description": "is returned if ArangoDB is not running in cluster mode.\n" - }, - "404": { - "description": "is returned if ArangoDB was not compiled for cluster operation.\n" - } - }, - "summary": "Execute a script", - "tags": [ - "Administration" - ] - } - }, - "/_admin/license": { - "get": { - "description": "View the license information and status of an Enterprise Edition instance.\nCan be called on single servers, Coordinators, and DB-Servers.\n", - "operationId": "getLicense", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "features": { - "description": "The properties of the license.\n", - "properties": { - "expires": { - "description": "The `expires` key lists the expiry date as Unix timestamp (seconds since\nJanuary 1st, 1970 UTC).\n", - "example": 1683173040, - "type": "number" - } - }, - "required": [ - "expires" - ], - "type": "object" - }, - "hash": { - "description": "The hash value of the license.\n", - "example": "982db5...44f3", - "type": "string" - }, - "license": { - "description": "The encrypted license key in Base64 encoding, or `\"none\"`\nin the Community Edition.\n", - "example": "V0h/W...wEDw==", - "type": "string" - }, - "status": { - "description": "The `status` key allows you to confirm the state of the installed license on a\nglance. The possible values are as follows:\n\n- `good`: The license is valid for more than 2 weeks.\n- `expiring`: The license is valid for less than 2 weeks.\n- `expired`: The license has expired. In this situation, no new\n Enterprise Edition features can be utilized.\n- `read-only`: The license is expired over 2 weeks. The instance is now\n restricted to read-only mode.\n", - "example": "good", - "type": "string" - }, - "upgrading": { - "description": "Whether the server is performing a database upgrade.\n", - "example": false, - "type": "boolean" - }, - "version": { - "description": "The license version number.\n", - "example": 1, - "type": "number" - } - }, - "required": [ - "license" - ], - "type": "object" - } - } - }, - "description": "Returns the license information.\n" - } - }, - "summary": "Get information about the current license", - "tags": [ - "Administration" - ] - }, - "put": { - "description": "Set a new license for an Enterprise Edition instance.\nCan be called on single servers, Coordinators, and DB-Servers.\n", - "operationId": "setLicense", - "parameters": [ - { - "description": "Set to `true` to change the license even if it expires sooner than the current one.\n", - "in": "query", - "name": "force", - "required": false, - "schema": { - "default": false, - "type": "boolean" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "description": "The request body has to contain the Base64-encoded string wrapped in double quotes.\n", - "example": "eyJncmFudCI6...(Base64-encoded license string)...", - "type": "string" - } - } - } - }, - "responses": { - "201": { - "content": { - "application/json": { - "schema": { - "properties": { - "result": { - "properties": { - "code": { - "description": "The HTTP status code.\n", - "example": 201, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - } - }, - "required": [ - "error", - "code" - ], - "type": "object" - } - }, - "required": [ - "result" - ], - "type": "object" - } - } - }, - "description": "License successfully deployed.\n" - }, - "400": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP status code.\n", - "example": 400, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" - }, - "errorNum": { - "description": "The ArangoDB error number.\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "If the license expires earlier than the previously installed one,\nor if the supplied license string is invalid.\n" - }, - "501": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP status code.\n", - "example": 501, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" - }, - "errorNum": { - "description": "The ArangoDB error number.\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "If you try to apply a license in the Community Edition.\n" - } - }, - "summary": "Set a new license", - "tags": [ - "Administration" - ] - } - }, - "/_admin/log": { + "/_admin/log/entries": { "get": { - "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/log/entries` instead, which provides the same data in a more\nintuitive and easier to process format.\n\n\nReturns fatal, error, warning or info log messages from the server's global log.\nThe result is a JSON object with the attributes described below.\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", - "operationId": "getLog", + "description": "Returns fatal, error, warning or info log messages from the server's global log.\nThe result is a JSON object with the following properties:\n\n- **total**: the total amount of log entries before pagination\n- **messages**: an array with log messages that matched the criteria\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", + "operationId": "getLogEntries", "parameters": [ { "description": "Returns all log entries up to log level `upto`. Note that `upto` must be:\n- `fatal` or `0`\n- `error` or `1`\n- `warning` or `2`\n- `info` or `3`\n- `debug` or `4`\nThe default value is `info`.\n", @@ -1973,7 +1687,7 @@ } }, { - "description": "Returns all log entries such that their log entry identifier (`lid` value)\nis greater or equal to `start`.\n", + "description": "Returns all log entries such that their log entry identifier (`lid` .)\nis greater or equal to `start`.\n", "in": "query", "name": "start", "required": false, @@ -2009,7 +1723,7 @@ } }, { - "description": "Sort the log entries either ascending (if `sort` is `asc`) or descending\n(if `sort` is `desc`) according to their `lid` values. Note that the `lid`\nimposes a chronological order. The default value is `asc`.\n", + "description": "Sort the log entries either ascending (if `sort` is `asc`) or descending\n(if `sort` is `desc`) according to their `id` values. Note that the `id`\nimposes a chronological order. The default value is `asc`.\n", "in": "query", "name": "sort", "required": false, @@ -2029,54 +1743,7 @@ ], "responses": { "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "level": { - "description": "A list of the log levels for all log entries.\n", - "type": "string" - }, - "lid": { - "description": "a list of log entry identifiers. Each log message is uniquely\nidentified by its @LIT{lid} and the identifiers are in ascending\norder.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "text": { - "description": "a list of the texts of all log entries\n", - "type": "string" - }, - "timestamp": { - "description": "a list of the timestamps as seconds since 1970-01-01 for all log\nentries.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "topic": { - "description": "a list of the topics of all log entries\n", - "type": "string" - }, - "totalAmount": { - "description": "the total amount of log entries before pagination.\n", - "type": "integer" - } - }, - "required": [ - "lid", - "level", - "timestamp", - "text", - "topic", - "totalAmount" - ], - "type": "object" - } - } - }, - "description": "" + "description": "is returned if the request is valid.\n" }, "400": { "description": "is returned if invalid values are specified for `upto` or `level`.\n" @@ -2085,82 +1752,19 @@ "description": "is returned if there are insufficient privileges to access the logs.\n" } }, - "summary": "Get the global server logs (deprecated)", + "summary": "Get the global server logs", "tags": [ "Monitoring" ] } }, - "/_admin/log/entries": { - "get": { - "description": "Returns fatal, error, warning or info log messages from the server's global log.\nThe result is a JSON object with the following properties:\n\n- **total**: the total amount of log entries before pagination\n- **messages**: an array with log messages that matched the criteria\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", - "operationId": "getLogEntries", + "/_admin/log/level": { + "delete": { + "description": "Revert the server's log level settings to the values they had at startup,\nas determined by the startup options specified on the command-line, a\nconfiguration file, and the factory defaults.\n\nThe result is a JSON object with the log topics being the object keys, and\nthe log levels being the object values.\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", + "operationId": "resetLogLevel", "parameters": [ { - "description": "Returns all log entries up to log level `upto`. Note that `upto` must be:\n- `fatal` or `0`\n- `error` or `1`\n- `warning` or `2`\n- `info` or `3`\n- `debug` or `4`\nThe default value is `info`.\n", - "in": "query", - "name": "upto", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "Returns all log entries of log level `level`. Note that the query parameters\n`upto` and `level` are mutually exclusive.\n", - "in": "query", - "name": "level", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "Returns all log entries such that their log entry identifier (`lid` .)\nis greater or equal to `start`.\n", - "in": "query", - "name": "start", - "required": false, - "schema": { - "type": "number" - } - }, - { - "description": "Restricts the result to at most `size` log entries.\n", - "in": "query", - "name": "size", - "required": false, - "schema": { - "type": "number" - } - }, - { - "description": "Starts to return log entries skipping the first `offset` log entries. `offset`\nand `size` can be used for pagination.\n", - "in": "query", - "name": "offset", - "required": false, - "schema": { - "type": "number" - } - }, - { - "description": "Only return the log entries containing the text specified in `search`.\n", - "in": "query", - "name": "search", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "Sort the log entries either ascending (if `sort` is `asc`) or descending\n(if `sort` is `desc`) according to their `id` values. Note that the `id`\nimposes a chronological order. The default value is `asc`.\n", - "in": "query", - "name": "sort", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "Returns all log entries of the specified server. All other query parameters\nremain valid. If no serverId is given, the asked server\nwill reply. This parameter is only meaningful on Coordinators.\n", + "description": "Forwards the request to the specified server.\n", "in": "query", "name": "serverId", "required": false, @@ -2171,22 +1775,17 @@ ], "responses": { "200": { - "description": "is returned if the request is valid.\n" - }, - "400": { - "description": "is returned if invalid values are specified for `upto` or `level`.\n" + "description": "The log levels have been reset successfully.\n" }, "403": { - "description": "is returned if there are insufficient privileges to access the logs.\n" + "description": "You have insufficient privileges to reset the log levels.\n" } }, - "summary": "Get the global server logs", + "summary": "Reset the server log levels", "tags": [ "Monitoring" ] - } - }, - "/_admin/log/level": { + }, "get": { "description": "Returns the server's current log level settings.\nThe result is a JSON object with the log topics being the object keys, and\nthe log levels being the object values.\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", "operationId": "getLogLevel", @@ -2199,6 +1798,16 @@ "schema": { "type": "string" } + }, + { + "description": "Set this option to `true` to return the individual log level settings\nof all log outputs (`appenders`) as well as the `global` settings.\n\nThe response structure is as follows:\n\n```json\n{\n \"global\": {\n \"agency\": \"INFO\",\n \"agencycomm\": \"INFO\",\n \"agencystore\": \"WARNING\",\n ...\n },\n \"appenders\": {\n \"-\": {\n \"agency\": \"INFO\",\n \"agencycomm\": \"INFO\",\n \"agencystore\": \"WARNING\",\n ...\n },\n \"file:///path/to/file\": {\n \"agency\": \"INFO\",\n \"agencycomm\": \"INFO\",\n \"agencystore\": \"WARNING\",\n ...\n },\n ...\n }\n}\n```\n", + "in": "query", + "name": "withAppenders", + "required": false, + "schema": { + "default": false, + "type": "boolean" + } } ], "responses": { @@ -2215,7 +1824,7 @@ ] }, "put": { - "description": "Modifies and returns the server's current log level settings.\nThe request body must be a JSON string with a log level or a JSON object with the\nlog topics being the object keys and the log levels being the object values.\n\nIf only a JSON string is specified as input, the log level is adjusted for the\n\"general\" log topic only. If a JSON object is specified as input, the log levels will\nbe set only for the log topic mentioned in the input object, but preserved for every\nother log topic.\nTo set the log level for all log levels to a specific value, it is possible to hand\nin the special pseudo log topic \"all\".\n\nThe result is a JSON object with all available log topics being the object keys, and\nthe adjusted log levels being the object values.\n\nPossible log levels are:\n- FATAL - There will be no way out of this. ArangoDB will go down after this message.\n- ERROR - This is an error. you should investigate and fix it. It may harm your production.\n- WARNING - This may be serious application-wise, but we don't know.\n- INFO - Something has happened, take notice, but no drama attached.\n- DEBUG - output debug messages\n- TRACE - trace - prepare your log to be flooded - don't use in production.\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", + "description": "Modifies and returns the server's current log level settings.\nThe request body must be a JSON string with a log level or a JSON object with the\nlog topics being the object keys and the log levels being the object values.\n\nIf only a JSON string is specified as input, the log level is adjusted for the\n\"general\" log topic only. If a JSON object is specified as input, the log levels will\nbe set only for the log topic mentioned in the input object, but preserved for every\nother log topic.\nTo set the log level for all log levels to a specific value, it is possible to hand\nin the special pseudo log topic \"all\".\n\nThe result is a JSON object with all available log topics being the object keys, and\nthe adjusted log levels being the object values.\n\nPossible log levels are:\n- `FATAL` - Only critical errors are logged after which the _arangod_\n process terminates.\n- `ERROR` - Only errors are logged. You should investigate and fix errors\n as they may harm your production.\n- `WARNING` - Errors and warnings are logged. Warnings may be serious\n application-wise and can indicate issues that might lead to errors\n later on.\n- `INFO` - Errors, warnings, and general information is logged.\n- `DEBUG` - Outputs debug messages used in the development of ArangoDB\n in addition to the above.\n- `TRACE` - Logs detailed tracing of operations in addition to the above.\n This can flood the log. Don't use this log level in production.\n\nThis API can be turned off via the startup option `--log.api-enabled`. In case\nthe API is disabled, all requests will be responded to with HTTP 403. If the\nAPI is enabled, accessing it requires admin privileges, or even superuser\nprivileges, depending on the value of the `--log.api-enabled` startup option.\n", "operationId": "setLogLevel", "parameters": [ { @@ -2226,6 +1835,16 @@ "schema": { "type": "string" } + }, + { + "description": "Set this option to `true` to set individual log level settings\nfor log outputs (`appenders`). The request and response structure is\nas follows:\n\n```json\n{\n \"global\": {\n \"agency\": \"INFO\",\n \"agencycomm\": \"INFO\",\n \"agencystore\": \"WARNING\",\n ...\n },\n \"appenders\": {\n \"-\": {\n \"agency\": \"INFO\",\n \"agencycomm\": \"INFO\",\n \"agencystore\": \"WARNING\",\n ...\n },\n \"file:///path/to/file\": {\n \"agency\": \"INFO\",\n \"agencycomm\": \"INFO\",\n \"agencystore\": \"WARNING\",\n ...\n },\n ...\n }\n}\n```\n\nChanging the `global` settings affects all outputs and is the same\nas setting a log level with this option turned off.\n", + "in": "query", + "name": "withAppenders", + "required": false, + "schema": { + "default": false, + "type": "boolean" + } } ], "requestBody": { @@ -2234,15 +1853,15 @@ "schema": { "properties": { "agency": { - "description": "One of the possible log topics.\n", + "description": "Agents use this log topic to inform about any activity\nincluding the RAFT consensus gossip.\n", "type": "string" }, "agencycomm": { - "description": "One of the possible log topics.\n", + "description": "DB-Servers and Coordinators log the requests they send to the\nAgency.\n", "type": "string" }, "agencystore": { - "description": "One of the possible log topics.\n", + "description": "Optional verbose logging of Agency write operations.\n", "type": "string" }, "all": { @@ -2250,219 +1869,199 @@ "type": "string" }, "aql": { - "description": "One of the possible log topics.\n", + "description": "Logs information about the AQL query optimization and\nexecution. DB-Servers and Coordinators log the cluster-internal\ncommunication around AQL queries. It also reports the AQL\nmemory limit on startup.\n", "type": "string" }, "arangosearch": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to ArangoSearch including Analyzers,\nthe column cache, and the commit and consolidation threads.\n", "type": "string" }, "audit-authentication": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether events such as successful logins and\nmissing or wrong credentials are written to the audit log\n(_Enterprise Edition only_).\n", "type": "string" }, "audit-authorization": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether events such as users trying to access databases\nwithout the necessary permissions are written to the audit log\n(_Enterprise Edition only_).\n", "type": "string" }, "audit-collection": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether events about collections creation, truncation,\nand deletion are written to the audit log (_Enterprise Edition only_).\n", "type": "string" }, "audit-database": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether events about database creation and deletion\nare written to the audit log (_Enterprise Edition only_).\n", "type": "string" }, "audit-document": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether document read and write events are written\nto the audit log (_Enterprise Edition only_).\n", "type": "string" }, "audit-hotbackup": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether the Hot Backup creation, restore, and delete\nevents are written to the audit log (_Enterprise Edition only_).\n", "type": "string" }, "audit-service": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether the start and stop events of the audit\nservice are written to the audit log (_Enterprise Edition only_).\n", "type": "string" }, "audit-view": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Controls whether events about View creation and deletion\nare written to the audit log (_Enterprise Edition only_).\n", "type": "string" }, "authentication": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to authentication, for example, when a\nJWT secret is generated or a token is validated against a secret.\n", "type": "string" }, "authorization": { - "description": "One of the possible log topics.\n", + "description": "Logs when a user has insufficient permissions for a request.\n", "type": "string" }, "backup": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to Hot Backup (_Enterprise Edition only_).\n", "type": "string" }, "bench": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to benchmarking with _arangobench_.\n", "type": "string" }, "cache": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to caching documents and index entries\nas well as the cache configuration on startup.\n", "type": "string" }, "cluster": { - "description": "One of the possible log topics.\n", - "type": "string" - }, - "clustercomm": { - "description": "One of the possible log topics.\n", - "type": "string" - }, - "collector": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to the cluster-internal communication\nas well as cluster operations. This includes changes to the\nstate and readiness of DB-Servers and connectivity checks\non Coordinators.\n", "type": "string" }, "communication": { - "description": "One of the possible log topics.\n", + "description": "Logs lower-level network connection and communication events.\n", "type": "string" }, "config": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to the startup options and server\nconfiguration.\n", "type": "string" }, "crash": { - "description": "One of the possible log topics.\n", + "description": "Logs information about a fatal error including a backtrace\nbefore the process terminates.\n", + "type": "string" + }, + "deprecation": { + "description": "Warns about deprecated features and the usage of options that\nwill not be allowed or have no effect in a future version.\n", "type": "string" }, "development": { - "description": "One of the possible log topics.\n", + "description": "This log topic is reserved for the development of ArangoDB.\n", "type": "string" }, "dump": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to dumping data with _arangodump_.\n", "type": "string" }, "engines": { - "description": "One of the possible log topics.\n", + "description": "Logs various information related to ArangoDB's use of the\nRocksDB storage engine, like the initialization and\nfile operations.\n\nRocksDB's internal log messages are passed through using the\n`rocksdb` log topic.\n", "type": "string" }, "flush": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to flushing data from memory to disk.\n", "type": "string" }, "general": { - "description": "One of the possible log topics.\n", + "description": "Logs all messages of general interest and that don't fit\nunder any of the other log topics. For example, it reports\nthe ArangoDB version and the detected operating system and\nmemory on startup.\n", "type": "string" }, "graphs": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to graph operations including\ngraph traversal and path search tracing.\n", "type": "string" }, "heartbeat": { - "description": "One of the possible log topics.\n", + "description": "Logs everything related to the cluster heartbeat for\nmonitoring the intra-connectivity.\n", "type": "string" }, "httpclient": { - "description": "One of the possible log topics.\n", - "type": "string" - }, - "ldap": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Logs the activity of the HTTP request subsystem that is used\nin replication, client tools, and V8.\n", "type": "string" }, "libiresearch": { - "description": "One of the possible log topics.\n", + "description": "Logs the internal log messages of IResearch, the underlying\nlibrary of ArangoSearch.\n", "type": "string" }, "license": { - "description": "One of the possible log topics (_Enterprise Edition only_).\n", + "description": "Logs events related to the license management like the\nexpiration of a license (_Enterprise Edition only_).\n", "type": "string" }, "maintenance": { - "description": "One of the possible log topics.\n", + "description": "Logs the operations of the cluster maintenance including\nshard locking and collection creation.\n", "type": "string" }, "memory": { - "description": "One of the possible log topics.\n", - "type": "string" - }, - "mmap": { - "description": "One of the possible log topics.\n", - "type": "string" - }, - "performance": { - "description": "One of the possible log topics.\n", - "type": "string" - }, - "pregel": { - "description": "One of the possible log topics.\n", + "description": "Logs the memory configuration on startup and reports\nproblems with memory alignment and operating system settings.\n", "type": "string" }, "queries": { - "description": "One of the possible log topics.\n", + "description": "Logs slow queries as well as internal details about the\nexecution of AQL queries at low log levels.\n", "type": "string" }, "replication": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to the data replication within a cluster.\n", "type": "string" }, "requests": { - "description": "One of the possible log topics.\n", + "description": "Logs the handling of internal and external requests and\ncan include IP addresses, endpoints, and HTTP headers and\nbodies when using low log levels.\n\nIt overlaps with the network `communication` log topic.\n", "type": "string" }, "restore": { - "description": "One of the possible log topics.\n", + "description": "This log topic is only used by _arangorestore_.\n", "type": "string" }, "rocksdb": { - "description": "One of the possible log topics.\n", + "description": "Logs RocksDB's internal log messages as well RocksDB\nbackground errors.\n\nInformation related to ArangoDB's use of the\nRocksDB storage engine uses the `engines` log topic.\n", "type": "string" }, "security": { - "description": "One of the possible log topics.\n", + "description": "Logs the security configuration for V8.\n", "type": "string" }, "ssl": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to the in-transit encryption of\nnetwork communication using SSL/TLS.\n", "type": "string" }, "startup": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to the startup and shutdown of a\nserver process as well as anything related to upgrading the\ndatabase directory.\n", "type": "string" }, "statistics": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to processing server statistics.\nThis is independent of server metrics.\n", "type": "string" }, "supervision": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to the Agency's cluster supervision.\n", "type": "string" }, "syscall": { - "description": "One of the possible log topics.\n", + "description": "Logs events related to calling operating system functions.\nIt reports problems related to file descriptors and the\nserver process monitoring.\n", "type": "string" }, "threads": { - "description": "One of the possible log topics.\n", + "description": "Logs information related to the use of operating system\nthreads and the threading configuration of ArangoDB.\n", "type": "string" }, "trx": { - "description": "One of the possible log topics.\n", + "description": "Logs information about transaction management.\n", "type": "string" }, "ttl": { - "description": "One of the possible log topics.\n", + "description": "Logs the activity of the background thread for\ntime-to-live (TTL) indexes.\n", "type": "string" }, "v8": { - "description": "One of the possible log topics.\n", + "description": "Logs various information related to ArangoDB's use of the\nV8 JavaScript engine, like the initialization as well as\nentering and exiting contexts.\n", "type": "string" }, "validation": { - "description": "One of the possible log topics.\n", + "description": "Logs when the schema validation fails for a document.\n", "type": "string" }, "views": { - "description": "One of the possible log topics.\n", + "description": "Logs certain events related to ArangoSearch Views.\n", "type": "string" } }, @@ -2554,79 +2153,6 @@ ] } }, - "/_admin/metrics": { - "get": { - "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/metrics/v2` instead. From version 3.10.0 onward, `/_admin/metrics`\nreturns the same metrics as `/_admin/metrics/v2`.\n\n\nReturns the instance's current metrics in Prometheus format. The\nreturned document collects all instance metrics, which are measured\nat any given time and exposes them for collection by Prometheus.\n\nThe document contains different metrics and metrics groups dependent\non the role of the queried instance. All exported metrics are\npublished with the `arangodb_` or `rocksdb_` string to distinguish\nthem from other collected data.\n\nThe API then needs to be added to the Prometheus configuration file\nfor collection.\n", - "operationId": "getMetrics", - "parameters": [ - { - "description": "Returns metrics of the specified server. If no serverId is given, the asked\nserver will reply. This parameter is only meaningful on Coordinators.\n", - "in": "query", - "name": "serverId", - "required": false, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Metrics were returned successfully.\n" - }, - "404": { - "description": "The metrics API may be disabled using `--server.export-metrics-api false`\nsetting in the server. In this case, the result of the call indicates the API\nto be not found.\n" - } - }, - "summary": "Get the metrics (deprecated)", - "tags": [ - "Monitoring" - ] - } - }, - "/_admin/metrics/v2": { - "get": { - "description": "Returns the instance's current metrics in Prometheus format. The\nreturned document collects all instance metrics, which are measured\nat any given time and exposes them for collection by Prometheus.\n\nThe document contains different metrics and metrics groups dependent\non the role of the queried instance. All exported metrics are\npublished with the prefix `arangodb_` or `rocksdb_` to distinguish them from\nother collected data.\n\nThe API then needs to be added to the Prometheus configuration file\nfor collection.\n", - "operationId": "getMetricsV2", - "parameters": [ - { - "description": "Returns metrics of the specified server. If no serverId is given, the asked\nserver will reply. This parameter is only meaningful on Coordinators.\n", - "in": "query", - "name": "serverId", - "required": false, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Metrics were returned successfully.\n" - }, - "404": { - "description": "The metrics API may be disabled using `--server.export-metrics-api false`\nsetting in the server. In this case, the result of the call indicates the API\nto be not found.\n" - } - }, - "summary": "Get the metrics", - "tags": [ - "Monitoring" - ] - } - }, - "/_admin/routing/reload": { - "post": { - "description": "Reloads the routing information from the `_routing` system collection if it\nexists, and makes Foxx rebuild its local routing table on the next request.\n", - "operationId": "reloadRouting", - "responses": { - "200": { - "description": "The routing information has been reloaded successfully.\n" - } - }, - "summary": "Reload the routing table", - "tags": [ - "Administration" - ] - } - }, "/_admin/server/availability": { "get": { "description": "Return availability information about a server.\n\nThe response is a JSON object with an attribute \"mode\". The \"mode\" can either\nbe \"readonly\", if the server is in read-only mode, or \"default\", if it is not.\nPlease note that the JSON object with \"mode\" is only returned in case the server\ndoes not respond with HTTP response code 503.\n\nThis is a public API so it does *not* require authentication. It is meant to be\nused only in the context of server monitoring.\n", @@ -2647,7 +2173,7 @@ }, "/_admin/server/encryption": { "post": { - "description": "Change the user-supplied encryption at rest key by sending a request without\npayload to this endpoint. The file supplied via `--rocksdb.encryption-keyfolder`\nwill be reloaded and the internal encryption key will be re-encrypted with the\nnew user key.\n\nThis is a protected API and can only be executed with superuser rights.\nThis API is not available on coordinator nodes.\n", + "description": "Change the user-supplied encryption at rest key by sending a request without\npayload to this endpoint. The file supplied via `--rocksdb.encryption-keyfolder`\nwill be reloaded and the internal encryption key will be re-encrypted with the\nnew user key.\n\nThis is a protected API and can only be executed with superuser rights.\nThis API is not available on Coordinator nodes.\n", "operationId": "rotateEncryptionAtRestKey", "responses": { "200": { @@ -2723,9 +2249,9 @@ } }, "/_admin/server/jwt": { - "get": { - "description": "Get information about the currently loaded secrets.\n\nTo utilize the API a superuser JWT token is necessary, otherwise the response\nwill be _HTTP 403 Forbidden_.\n", - "operationId": "getServerJwtSecrets", + "post": { + "description": "Sending a request without payload to this endpoint reloads the JWT secret(s)\nfrom disk. Only the files specified via the arangod startup option\n`--server.jwt-secret-keyfile` or `--server.jwt-secret-folder` are used.\nIt is not possible to change the locations where files are loaded from\nwithout restarting the process.\n\nTo utilize the API a superuser JWT token is necessary, otherwise the response\nwill be _HTTP 403 Forbidden_.\n", + "operationId": "reloadServerJwtSecrets", "responses": { "200": { "content": { @@ -2778,125 +2304,88 @@ "description": "if the request was not authenticated as a user with sufficient rights\n" } }, - "summary": "Get information about the loaded JWT secrets", + "summary": "Hot-reload the JWT secret(s) from disk", "tags": [ "Authentication" ] - }, - "post": { - "description": "Sending a request without payload to this endpoint reloads the JWT secret(s)\nfrom disk. Only the files specified via the arangod startup option\n`--server.jwt-secret-keyfile` or `--server.jwt-secret-folder` are used.\nIt is not possible to change the locations where files are loaded from\nwithout restarting the process.\n\nTo utilize the API a superuser JWT token is necessary, otherwise the response\nwill be _HTTP 403 Forbidden_.\n", - "operationId": "reloadServerJwtSecrets", + } + }, + "/_admin/server/role": { + "get": { + "description": "Returns the role of a server in a cluster.\nThe server role is returned in the `role` attribute of the result.\n", + "operationId": "getServerRole", "responses": { "200": { "content": { "application/json": { "schema": { - "description": "The reply with the JWT secrets information.\n", "properties": { "code": { - "description": "the HTTP status code - 200 in this case\n", + "description": "the HTTP status code, always 200\n", "type": "integer" }, "error": { - "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", + "description": "always `false`\n", "type": "boolean" }, - "result": { - "description": "The result object.\n", - "properties": { - "active": { - "description": "An object with the SHA-256 hash of the active secret.\n", - "type": "object" - }, - "passive": { - "description": "An array of objects with the SHA-256 hashes of the passive secrets.\n\nCan be empty.\n", - "items": { - "type": "object" - }, - "type": "array" - } - }, - "required": [ - "active", - "passive" + "errorNum": { + "description": "the server error number\n", + "type": "integer" + }, + "role": { + "description": "The server role.\n- `SINGLE`: the server is a standalone server without clustering\n- `COORDINATOR`: the server is a Coordinator in a cluster\n- `PRIMARY`: the server is a DB-Server in a cluster\n- `SECONDARY`: this role is not used anymore\n- `AGENT`: the server is an Agency node in a cluster\n- `UNDEFINED`: in a cluster, this is returned if the server role cannot be\n determined.\n", + "enum": [ + "SINGLE", + "COORDINATOR", + "PRIMARY", + "SECONDARY", + "AGENT", + "UNDEFINED" ], - "type": "object" + "type": "string" } }, "required": [ "error", "code", - "result" + "errorNum", + "role" ], "type": "object" } } }, - "description": "" - }, - "403": { - "description": "if the request was not authenticated as a user with sufficient rights\n" + "description": "Is returned in all cases.\n" } }, - "summary": "Hot-reload the JWT secret(s) from disk", + "summary": "Get the server role", "tags": [ - "Authentication" + "Cluster" ] } }, - "/_admin/server/mode": { - "get": { - "description": "Return mode information about a server. The json response will contain\na field `mode` with the value `readonly` or `default`. In a read-only server\nall write operations will fail with an error code of `1004` (_ERROR_READ_ONLY_).\nCreating or dropping of databases and collections will also fail with error code `11` (_ERROR_FORBIDDEN_).\n\nThis API requires authentication.\n", - "operationId": "getServerMode", - "responses": { - "200": { - "description": "This API will return HTTP 200 if everything is ok\n" - } - }, - "summary": "Return whether or not a server is in read-only mode", - "tags": [ - "Administration" - ] - }, - "put": { - "description": "Update mode information about a server. The JSON response will contain\na field `mode` with the value `readonly` or `default`. In a read-only server\nall write operations will fail with an error code of `1004` (_ERROR_READ_ONLY_).\nCreating or dropping of databases and collections will also fail with error\ncode `11` (_ERROR_FORBIDDEN_).\n\nThis is a protected API. It requires authentication and administrative\nserver rights.\n", - "operationId": "setServerMode", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "mode": { - "description": "The mode of the server `readonly` or `default`.\n", - "type": "string" - } - }, - "required": [ - "mode" - ], - "type": "object" - } - } - } - }, + "/_admin/server/tls": { + "post": { + "description": "This API call triggers a reload of all the TLS data (server key, client-auth CA)\nand then returns a summary. The JSON response is exactly as in the corresponding\nGET request.\n\nThis is a protected API and can only be executed with superuser rights.\n", + "operationId": "reloadServerTls", "responses": { "200": { "description": "This API will return HTTP 200 if everything is ok\n" }, - "401": { - "description": "if the request was not authenticated as a user with sufficient rights\n" + "403": { + "description": "This API will return HTTP 403 Forbidden if it is not called with\nsuperuser rights.\n" } }, - "summary": "Set the server mode to read-only or default", + "summary": "Reload the TLS data", "tags": [ - "Administration" + "Security" ] } }, - "/_admin/server/role": { + "/_api/cluster/endpoints": { "get": { - "description": "Returns the role of a server in a cluster.\nThe server role is returned in the `role` attribute of the result.\n", - "operationId": "getServerRole", + "description": "Returns an object with an attribute `endpoints`, which contains an\narray of objects, which each have the attribute `endpoint`, whose value\nis a string with the endpoint description. There is an entry for each\nCoordinator in the cluster. This method only works on Coordinators in\ncluster mode. In case of an error the `error` attribute is set to\n`true`.\n", + "operationId": "listClusterEndpoints", "responses": { "200": { "content": { @@ -2904,1207 +2393,873 @@ "schema": { "properties": { "code": { - "description": "the HTTP status code, always 200\n", + "description": "the HTTP status code - 200\n", "type": "integer" }, + "endpoints": { + "description": "A list of active cluster endpoints.\n", + "items": { + "properties": { + "endpoint": { + "description": "The bind of the Coordinator, like `tcp://[::1]:8530`\n", + "type": "string" + } + }, + "required": [ + "endpoint" + ], + "type": "object" + }, + "type": "array" + }, "error": { - "description": "always `false`\n", + "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", "type": "boolean" - }, - "errorNum": { - "description": "the server error number\n", - "type": "integer" - }, - "role": { - "description": "The server role. Possible values:\n- `SINGLE`: the server is a standalone server without clustering\n- `COORDINATOR`: the server is a Coordinator in a cluster\n- `PRIMARY`: the server is a DB-Server in a cluster\n- `SECONDARY`: this role is not used anymore\n- `AGENT`: the server is an Agency node in a cluster\n- `UNDEFINED`: in a cluster, this is returned if the server role cannot be\n determined.\n", - "type": "string" } }, "required": [ "error", "code", - "errorNum", - "role" + "endpoints" ], "type": "object" } } }, - "description": "Is returned in all cases.\n" + "description": "is returned when everything went well.\n" + }, + "501": { + "description": "server is not a Coordinator or method was not GET.\n" } }, - "summary": "Get the server role", + "summary": "List all Coordinator endpoints", "tags": [ "Cluster" ] } }, - "/_admin/server/tls": { + "/_db/_system/_admin/options": { "get": { - "description": "Return a summary of the TLS data. The JSON response will contain a field\n`result` with the following components:\n\n - `keyfile`: Information about the key file.\n - `clientCA`: Information about the Certificate Authority (CA) for\n client certificate verification.\n\nIf server name indication (SNI) is used and multiple key files are\nconfigured for different server names, then there is an additional\nattribute `SNI`, which contains for each configured server name\nthe corresponding information about the key file for that server name.\n\nIn all cases the value of the attribute will be a JSON object, which\nhas a subset of the following attributes (whatever is appropriate):\n\n - `sha256`: The value is a string with the SHA256 of the whole input\n file.\n - `certificates`: The value is a JSON array with the public\n certificates in the chain in the file.\n - `privateKeySha256`: In cases where there is a private key (`keyfile`\n but not `clientCA`), this field is present and contains a\n JSON string with the SHA256 of the private key.\n\nThis API requires authentication.\n", - "operationId": "getServerTls", - "responses": { - "200": { - "description": "This API will return HTTP 200 if everything is ok\n" - } - }, - "summary": "Get the TLS data", - "tags": [ - "Security" - ] - }, - "post": { - "description": "This API call triggers a reload of all the TLS data (server key, client-auth CA)\nand then returns a summary. The JSON response is exactly as in the corresponding\nGET request.\n\nThis is a protected API and can only be executed with superuser rights.\n", - "operationId": "reloadServerTls", + "description": "Return the effective configuration of the queried _arangod_ instance as\nset by startup options on the command-line and via a configuration file.\n\n{{\u003c security \u003e}}\nThis endpoint may reveal sensitive information about the deployment!\n{{\u003c /security \u003e}}\n\nThe endpoint can only be accessed via the `_system` database.\nIn addition, the `--server.options-api` startup option controls the\nrequired privileges to access the option endpoints and allows you to\ndisable them entirely. The option can have the following values:\n- `disabled`: the option endpoints are disabled\n- `jwt`: the option endpoints can only be accessed using a superuser JWT (default)\n- `admin`: the option endpoints can only be accessed by users with\n *Administrate* access level for the `_system` database\n- `public`: every user with access to the `_system` database can access\n the option endpoints\n", + "operationId": "getEffectiveStartupOptions", "responses": { "200": { - "description": "This API will return HTTP 200 if everything is ok\n" - }, - "403": { - "description": "This API will return HTTP 403 Forbidden if it is not called with\nsuperuser rights.\n" + "content": { + "application/json": { + "schema": { + "type": "object" + } + } + }, + "description": "An object with startup option names as keys and their effective\nvalue as values. The values can be of different data types, typically\nnumbers, strings, or arrays thereof.\n" } }, - "summary": "Reload the TLS data", + "summary": "Get the startup option configuration", "tags": [ - "Security" + "Administration" ] } }, - "/_admin/shutdown": { - "delete": { - "description": "This call initiates a clean shutdown sequence. Requires administrative privileges.\n", - "operationId": "startShutdown", - "parameters": [ - { - "description": "\u003csmall\u003eIntroduced in: v3.7.12, v3.8.1, v3.9.0\u003c/small\u003e\n\nIf set to `true`, this initiates a soft shutdown. This is only available\non Coordinators. When issued, the Coordinator tracks a number of ongoing\noperations, waits until all have finished, and then shuts itself down\nnormally. It will still accept new operations.\n\nThis feature can be used to make restart operations of Coordinators less\nintrusive for clients. It is designed for setups with a load balancer in front\nof Coordinators. Remove the designated Coordinator from the load balancer before\nissuing the soft-shutdown. The remaining Coordinators will internally forward\nrequests that need to be handled by the designated Coordinator. All other\nrequests will be handled by the remaining Coordinators, reducing the designated\nCoordinator's load.\n\nThe following types of operations are tracked:\n\n - AQL cursors (in particular streaming cursors)\n - Transactions (in particular stream transactions)\n - Pregel runs (conducted by this Coordinator)\n - Ongoing asynchronous requests (using the `x-arango-async: store` HTTP header)\n - Finished asynchronous requests, whose result has not yet been\n collected\n - Queued low priority requests (most normal requests)\n - Ongoing low priority requests\n", - "in": "query", - "name": "soft", - "required": false, - "schema": { - "type": "boolean" - } - } - ], + "/_db/_system/_admin/options-description": { + "get": { + "description": "Return the startup options available to configure the queried _arangod_\ninstance, similar to the `--dump-options` startup option.\n\nThe endpoint can only be accessed via the `_system` database.\nIn addition, the `--server.options-api` startup option controls the\nrequired privileges to access the option endpoints and allows you to\ndisable them entirely. The option can have the following values:\n- `disabled`: the option endpoints are disabled\n- `jwt`: the option endpoints can only be accessed using a superuser JWT (default)\n- `admin`: the option endpoints can be accessed by admin users in the `_system`\n database only.\n- `public`: every user with access to the `_system` database can access\n the option endpoints.\n", + "operationId": "getAvailableStartupOptions", "responses": { "200": { - "description": "is returned in all cases, `OK` will be returned in the result buffer on success.\n" + "content": { + "application/json": { + "schema": { + "type": "object" + } + } + }, + "description": "An object with startup option names as keys and sub-objects as values.\nThe structure of each sub-object is as follows:\n- `section` (string): The part before the dot of a startup option\n (`--section.param`), or `\"\"` if it is a general option that doesn't\n belong to a section\n- `description` (string): A succinct explanation of the startup option\n- `longDescription` (string, *optional*): Additional details about the\n startup option if available\n- `category` (string): Either `\"option\"` for regular options or `\"command\"`\n if using the option performs an action and then terminates the process\n- `hidden` (boolean): Whether the option is uncommon. If yes, then\n the `--help` command does not list it, but `--help-all` lists every\n startup option\n- `type` (string): the data type of the option, typically one of\n `\"uint64\"`, `\"uint32\"`, `\"int64\"`, `\"int32\"`, `\"double\"`, `\"boolean\"`,\n `\"string\"`, `\"string...\"`\n- `experimental` (boolean): Whether the option relates to a feature\n that is not ready for production yet\n- `obsolete` (boolean): Whether the option has been deprecated and\n no effect anymore\n- `enterpriseOnly` (boolean): Whether the option is only available in\n the Enterprise Edition. The Community Edition does have most of the\n Enterprise Edition startup options and they are thus not reported\n- `requiresValue` (boolean): Whether the option can be specified\n without a value to enable it\n- `os` (array of strings): The operating systems the startup option\n is supported on, always `[\"linux\"]`\n- `component` (array of strings): A list of server roles the startup\n option is available on. If it is supported by all cluster node types\n as well as the single server deployment mode, then the value is\n `[\"coordinator\", \"dbserver\", \"agent\", \"single\"]`\n- `introducedIn` (array of strings\\|null): A list of versions the\n startup option has been added in. Does not include later minor and\n major versions then the current version, and the information may\n get removed once all listed versions reach their end of life\n- `deprecatedIn` (array of strings\\|null): A list of versions the\n startup option has been marked for deprecation in. It can still\n be used until fully removed. Does not include later minor and\n major versions then the current version, and the information may\n get removed once all listed versions reach their end of life\n- `values` (string, *optional*):\n A description of the possible values you can set\n- `default` (any, *optional*): The standard value if the option is not set\n- `dynamic` (boolean): Whether the default value is calculated based\n on the target host configuration, e.g. available memory\n- `required` (boolean): Whether the option must be specified\n- `base` (number, *optional*): the unit for a numeric option\n- `minValue` (number, *optional*): The minimum value for a numeric option\n- `maxValue` (number, *optional*): The maximum value for a numeric option\n- `minInclusive` (boolean, *optional*): Whether the minimum value is\n included in the allowed value range\n- `maxInclusive` (boolean, *optional*): Whether the maximum value is\n included in the allowed value range\n" } }, - "summary": "Start the shutdown sequence", + "summary": "Get the available startup options", "tags": [ "Administration" ] - }, + } + }, + "/_db/_system/_admin/support-info": { "get": { - "description": "\u003csmall\u003eIntroduced in: v3.7.12, v3.8.1, v3.9.0\u003c/small\u003e\n\nThis call reports progress about a soft Coordinator shutdown (see\ndocumentation of `DELETE /_admin/shutdown?soft=true`).\nIn this case, the following types of operations are tracked:\n\n - AQL cursors (in particular streaming cursors)\n - Transactions (in particular stream transactions)\n - Pregel runs (conducted by this Coordinator)\n - Ongoing asynchronous requests (using the `x-arango-async: store` HTTP header)\n - Finished asynchronous requests, whose result has not yet been\n collected\n - Queued low priority requests (most normal requests)\n - Ongoing low priority requests\n\nThis API is only available on Coordinators.\n", - "operationId": "getShutdownProgress", + "description": "Retrieves deployment information for support purposes. The endpoint returns data\nabout the ArangoDB version used, the host (operating system, server ID, CPU and\nstorage capacity, current utilization, a few metrics) and the other servers in\nthe deployment (in case of cluster deployments).\n\nAs this API may reveal sensitive data about the deployment, it can only be\naccessed from inside the `_system` database. In addition, there is a policy\ncontrol startup option `--server.support-info-api` that controls if and to whom\nthe API is made available.\n", + "operationId": "getSupportInfo", "responses": { "200": { "content": { "application/json": { "schema": { "properties": { - "AQLcursors": { - "description": "Number of AQL cursors that are still active.\n", - "type": "number" - }, - "allClear": { - "description": "Whether all active operations finished.\n", - "type": "boolean" - }, - "doneJobs": { - "description": "Number of finished asynchronous requests, whose result has not yet been collected.\n", - "type": "number" - }, - "lowPrioOngoingRequests": { - "description": "Number of queued low priority requests.\n", - "type": "number" - }, - "lowPrioQueuedRequests": { - "description": "Number of ongoing low priority requests.\n", - "type": "number" - }, - "pendingJobs": { - "description": "Number of ongoing asynchronous requests.\n", - "type": "number" - }, - "pregelConductors": { - "description": "Number of ongoing Pregel jobs.\n", - "type": "number" + "date": { + "description": "ISO 8601 datetime string of when the information was requested.\n", + "type": "string" }, - "softShutdownOngoing": { - "description": "Whether a soft shutdown of the Coordinator is in progress.\n", - "type": "boolean" + "deployment": { + "description": "An object with at least a `type` attribute, indicating the deployment mode.\n\nIn case of a `\"single\"` server, additional information is provided in the\ntop-level `host` attribute.\n\nIn case of a `\"cluster\"`, there is a `servers` object that contains a nested\nobject for each Coordinator and DB-Server, using the server ID as key. Each\nobject holds information about the ArangoDB instance as well as the host machine.\nThere are additional attributes for the number of `agents`, `coordinators`,\n`dbServers`, and `shards`.\n", + "type": "object" }, - "transactions": { - "description": "Number of ongoing transactions.\n", - "type": "number" + "host": { + "description": "An object that holds information about the ArangoDB instance as well as the\nhost machine. Only set in case of single servers.\n", + "type": "object" } }, "required": [ - "softShutdownOngoing", - "AQLcursors", - "transactions", - "pendingJobs", - "doneJobs", - "pregelConductors", - "lowPrioOngoingRequests", - "lowPrioQueuedRequests", - "allClear" + "date", + "deployment" ], "type": "object" } } }, - "description": "The response indicates the fact that a soft shutdown is ongoing and the\nnumber of active operations of the various types. Once all numbers have gone\nto 0, the flag `allClear` is set and the Coordinator shuts down automatically.\n" + "description": "" + }, + "404": { + "description": "The support info API is turned off.\n" } }, - "summary": "Query the soft shutdown progress", + "summary": "Get information about the deployment", "tags": [ "Administration" ] } }, - "/_admin/statistics": { + "/_db/_system/_api/database": { "get": { - "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/metrics/v2` instead, which provides the data exposed by this API\nand a lot more.\n\n\nReturns the statistics information. The returned object contains the\nstatistics figures grouped together according to the description returned by\n`/_admin/statistics-description`. For instance, to access a figure `userTime`\nfrom the group `system`, you first select the sub-object describing the\ngroup stored in `system` and in that sub-object the value for `userTime` is\nstored in the attribute of the same name.\n\nIn case of a distribution, the returned object contains the total count in\n`count` and the distribution list in `counts`. The sum (or total) of the\nindividual values is returned in `sum`.\n\nThe transaction statistics show the local started, committed and aborted\ntransactions as well as intermediate commits done for the server queried. The\nintermediate commit count will only take non zero values for the RocksDB\nstorage engine. Coordinators do almost no local transactions themselves in\ntheir local databases, therefor cluster transactions (transactions started on a\nCoordinator that require DB-Servers to finish before the transactions is\ncommitted cluster wide) are just added to their local statistics. This means\nthat the statistics you would see for a single server is roughly what you can\nexpect in a cluster setup using a single Coordinator querying this Coordinator.\nJust with the difference that cluster transactions have no notion of\nintermediate commits and will not increase the value.\n", - "operationId": "getStatistics", + "description": "Retrieves the list of all existing databases\n\n\u003e **INFO:**\nRetrieving the list of databases is only possible from within the `_system` database.\n", + "operationId": "listDatabases", "responses": { "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "client": { - "description": "information about the connected clients and their resource usage\n", - "properties": { - "bytesReceived": { - "description": "number of bytes received from the clients\n", - "properties": { - "count": { - "description": "number of values summarized\n", - "type": "integer" - }, - "counts": { - "description": "array containing the values\n", - "items": { - "type": "integer" - }, - "type": "array" - }, - "sum": { - "description": "summarized value of all counts\n", - "type": "number" - } - }, - "required": [ - "sum", - "count", - "counts" - ], - "type": "object" - }, - "bytesSent": { - "description": "number of bytes sent to the clients\n", - "properties": { - "count": { - "description": "number of values summarized\n", - "type": "integer" - }, - "counts": { - "description": "array containing the values\n", - "items": { - "type": "integer" - }, - "type": "array" - }, - "sum": { - "description": "summarized value of all counts\n", - "type": "number" - } - }, - "required": [ - "sum", - "count", - "counts" - ], - "type": "object" - }, - "connectionTime": { - "description": "total connection times\n", - "properties": { - "count": { - "description": "number of values summarized\n", - "type": "integer" - }, - "counts": { - "description": "array containing the values\n", - "items": { - "type": "integer" - }, - "type": "array" - }, - "sum": { - "description": "summarized value of all counts\n", - "type": "number" - } - }, - "required": [ - "sum", - "count", - "counts" - ], - "type": "object" - }, - "httpConnections": { - "description": "the number of open http connections\n", - "type": "integer" - }, - "ioTime": { - "description": "IO Time\n", - "properties": { - "count": { - "description": "number of values summarized\n", - "type": "integer" - }, - "counts": { - "description": "array containing the values\n", - "items": { - "type": "integer" - }, - "type": "array" - }, - "sum": { - "description": "summarized value of all counts\n", - "type": "number" - } - }, - "required": [ - "sum", - "count", - "counts" - ], - "type": "object" + "description": "is returned if the list of database was compiled successfully.\n" + }, + "400": { + "description": "is returned if the request is invalid.\n" + }, + "403": { + "description": "is returned if the request was not executed in the `_system` database.\n" + } + }, + "summary": "List all databases", + "tags": [ + "Databases" + ] + }, + "post": { + "description": "Creates a new database.\n\nThe response is a JSON object with the attribute `result` set to `true`.\n\n\u003e **INFO:**\nCreating a new database is only possible from within the `_system` database.\n", + "operationId": "createDatabase", + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "name": { + "description": "Has to contain a valid database name. The name must conform to the selected\nnaming convention for databases. If the name contains Unicode characters, the\nname must be [NFC-normalized](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms).\nNon-normalized names will be rejected by arangod.\n", + "type": "string" + }, + "options": { + "description": "Optional object which can contain the following attributes:\n", + "properties": { + "replicationFactor": { + "description": "Default replication factor for new collections created in this database.\nSpecial values include \"satellite\", which will replicate the collection to\nevery DB-Server (Enterprise Edition only), and 1, which disables replication.\n_(cluster only)_\n", + "type": "integer" + }, + "sharding": { + "description": "The sharding method to use for new collections in this database. Valid values\nare: \"\", \"flexible\", or \"single\". The first two are equivalent. _(cluster only)_\n", + "type": "string" + }, + "writeConcern": { + "description": "Default write concern for new collections created in this database.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the `--cluster.write-concern`\nstartup option, which defaults to `1`. _(cluster only)_\n", + "type": "number" + } + }, + "type": "object" + }, + "users": { + "description": "An array of user objects. The users will be granted *Administrate* permissions\nfor the new database. Users that do not exist yet will be created.\nIf `users` is not specified or does not contain any users, the default user\n`root` will be used to ensure that the new database will be accessible after it\nis created. The `root` user is created with an empty password should it not\nexist. Each user object can contain the following attributes:\n", + "items": { + "properties": { + "active": { + "description": "A flag indicating whether the user account should be activated or not.\nThe default value is `true`. If set to `false`, then the user won't be able to\nlog into the database. The default is `true`. The attribute is ignored for users\nthat already exist.\n", + "type": "boolean" }, - "queueTime": { - "description": "the time requests were queued waiting for processing\n", - "properties": { - "count": { - "description": "number of values summarized\n", - "type": "integer" - }, - "counts": { - "description": "array containing the values\n", - "items": { - "type": "integer" - }, - "type": "array" - }, - "sum": { - "description": "summarized value of all counts\n", - "type": "number" - } - }, - "required": [ - "sum", - "count", - "counts" - ], + "extra": { + "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", "type": "object" }, - "requestTime": { - "description": "the request times\n", - "properties": { - "count": { - "description": "number of values summarized\n", - "type": "integer" - }, - "counts": { - "description": "array containing the values\n", - "items": { - "type": "integer" - }, - "type": "array" - }, - "sum": { - "description": "summarized value of all counts\n", - "type": "number" - } - }, - "required": [ - "sum", - "count", - "counts" - ], - "type": "object" + "passwd": { + "description": "The user password as a string. If not specified, it will default to an empty\nstring. The attribute is ignored for users that already exist.\n", + "type": "string" }, - "totalTime": { - "description": "the system time\n", - "properties": { - "count": { - "description": "number of values summarized\n", - "type": "integer" - }, - "counts": { - "description": "array containing the values\n", - "items": { - "type": "integer" - }, - "type": "array" - }, - "sum": { - "description": "summarized value of all counts\n", - "type": "number" - } - }, - "required": [ - "sum", - "count", - "counts" - ], - "type": "object" + "username": { + "description": "Login name of an existing user or one to be created.\n", + "type": "string" } }, "required": [ - "connectionTime", - "totalTime", - "requestTime", - "queueTime", - "ioTime", - "bytesSent", - "bytesReceived", - "httpConnections" + "username" ], "type": "object" }, - "code": { - "description": "the HTTP status code - 200 in this case\n", - "type": "integer" - }, - "enabled": { - "description": "`true` if the server has the statistics module enabled. If not, don't expect any values.\n", - "type": "boolean" - }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", - "type": "boolean" - }, - "errorMessage": { - "description": "a descriptive error message\n", - "type": "string" - }, - "http": { - "description": "the numbers of requests by Verb\n", - "properties": { - "requestsAsync": { - "description": "total number of asynchronous http requests\n", - "type": "integer" - }, - "requestsDelete": { - "description": "No of requests using the DELETE-verb\n", - "type": "integer" - }, - "requestsGet": { - "description": "No of requests using the GET-verb\n", - "type": "integer" - }, - "requestsHead": { - "description": "No of requests using the HEAD-verb\n", - "type": "integer" - }, - "requestsOptions": { - "description": "No of requests using the OPTIONS-verb\n", - "type": "integer" - }, - "requestsOther": { - "description": "No of requests using the none of the above identified verbs\n", - "type": "integer" - }, - "requestsPatch": { - "description": "No of requests using the PATCH-verb\n", - "type": "integer" - }, - "requestsPost": { - "description": "No of requests using the POST-verb\n", + "type": "array" + } + }, + "required": [ + "name" + ], + "type": "object" + } + } + } + }, + "responses": { + "201": { + "description": "is returned if the database was created successfully.\n" + }, + "400": { + "description": "is returned if the request parameters are invalid, if a database with the\nspecified name already exists, or if the configured limit to the number\nof databases has been reached.\n" + }, + "403": { + "description": "is returned if the request was not executed in the `_system` database.\n" + }, + "409": { + "description": "is returned if a database with the specified name already exists.\n" + } + }, + "summary": "Create a database", + "tags": [ + "Databases" + ] + } + }, + "/_db/_system/_api/database/{database-name}": { + "delete": { + "description": "Drops the database along with all data stored in it.\n\n\u003e **INFO:**\nDropping a database is only possible from within the `_system` database.\nThe `_system` database itself cannot be dropped.\n", + "operationId": "deleteDatabase", + "parameters": [ + { + "description": "The name of the database\n", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "is returned if the database was dropped successfully.\n" + }, + "400": { + "description": "is returned if the request is malformed.\n" + }, + "403": { + "description": "is returned if the request was not executed in the `_system` database.\n" + }, + "404": { + "description": "is returned if the database could not be found.\n" + } + }, + "summary": "Drop a database", + "tags": [ + "Databases" + ] + } + }, + "/_db/_system/_api/endpoint": { + "get": { + "description": "\u003e **WARNING:**\nThis route should no longer be used.\nIt is considered as deprecated from version 3.4.0 on.\n\n\nReturns an array of all configured endpoints the server is listening on.\n\nThe result is a JSON array of JSON objects, each with `\"entrypoint\"` as\nthe only attribute, and with the value being a string describing the\nendpoint.\n\n\u003e **INFO:**\nRetrieving the array of all endpoints is allowed in the system database\nonly. Calling this action in any other database will make the server return\nan error.\n", + "operationId": "listEndpoints", + "responses": { + "200": { + "description": "is returned when the array of endpoints can be determined successfully.\n" + }, + "400": { + "description": "is returned if the action is not carried out in the system database.\n" + }, + "405": { + "description": "The server will respond with *HTTP 405* if an unsupported HTTP method is used.\n" + } + }, + "summary": "List the endpoints of a single server (deprecated)", + "tags": [ + "Administration" + ] + } + }, + "/_db/{database-name}/_admin/echo": { + "post": { + "description": "The call returns an object with the servers request information\n", + "operationId": "echoRequest", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "body": { + "description": "The request body can be of any type and is simply forwarded.\n", + "type": "string" + } + }, + "required": [ + "body" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "authorized": { + "description": "Whether the session is authorized\n", + "type": "boolean" + }, + "client": { + "description": "Attributes of the client connection\n", + "properties": { + "address": { + "description": "The IP address of the client\n", "type": "integer" }, - "requestsPut": { - "description": "No of requests using the PUT-verb\n", - "type": "integer" + "id": { + "description": "A server generated ID\n", + "type": "string" }, - "requestsTotal": { - "description": "total number of http requests\n", + "port": { + "description": "The port of the TCP connection on the client-side\n", "type": "integer" } }, "required": [ - "requestsTotal", - "requestsAsync", - "requestsGet", - "requestsHead", - "requestsPost", - "requestsPut", - "requestsPatch", - "requestsDelete", - "requestsOptions", - "requestsOther" + "address", + "port", + "id" ], "type": "object" }, + "cookies": { + "description": "A list of the cookies you sent\n", + "type": "object" + }, + "database": { + "description": "The name of the database this request was executed on\n", + "type": "string" + }, + "headers": { + "description": "The list of the HTTP headers you sent\n", + "type": "object" + }, + "internals": { + "description": "Contents of the server internals struct\n", + "type": "object" + }, + "isAdminUser": { + "description": "Whether the current user is an administrator\n", + "type": "boolean" + }, + "parameters": { + "description": "An object containing the query parameters\n", + "type": "object" + }, + "path": { + "description": "The relative path of this request (decoded, excluding `/_admin/echo`)\n", + "type": "string" + }, + "portType": { + "description": "The type of the socket, one of `\"tcp/ip\"`, `\"unix\"`, `\"unknown\"`\n", + "type": "string" + }, + "prefix": { + "description": "The prefix of the database\n", + "type": "object" + }, + "protocol": { + "description": "The transport protocol, one of `\"http\"`, `\"https\"`\n", + "type": "string" + }, + "rawRequestBody": { + "description": "The sent payload as a JSON-encoded Buffer object\n", + "type": "object" + }, + "rawSuffix": { + "description": "A list of the percent-encoded URL path suffixes\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "requestBody": { + "description": "Stringified version of the request body you sent\n", + "type": "string" + }, + "requestType": { + "description": "The HTTP method that was used for the request (`\"POST\"`). The endpoint can be\nqueried using other verbs, too (`\"GET\"`, `\"PUT\"`, `\"PATCH\"`, `\"DELETE\"`).\n", + "type": "string" + }, "server": { - "description": "statistics of the server\n", + "description": "Attributes of the server connection\n", "properties": { - "physicalMemory": { - "description": "available physical memory on the server\n", - "type": "integer" + "address": { + "description": "The bind address of the endpoint this request was sent to\n", + "type": "string" }, - "threads": { - "description": "Statistics about the server worker threads (excluding V8 specific or jemalloc specific threads and system threads)\n", - "properties": { - "in-progress": { - "description": "The number of currently busy worker threads\n", - "type": "integer" - }, - "queued": { - "description": "The number of jobs queued up waiting for worker threads becoming available\n", - "type": "integer" - }, - "scheduler-threads": { - "description": "The number of spawned worker threads\n", - "type": "integer" - } - }, - "required": [ - "scheduler-threads", - "in-progress", - "queued" - ], - "type": "object" - }, - "transactions": { - "description": "Statistics about transactions\n", - "properties": { - "aborted": { - "description": "the number of aborted transactions\n", - "type": "integer" - }, - "committed": { - "description": "the number of committed transactions\n", - "type": "integer" - }, - "intermediateCommits": { - "description": "the number of intermediate commits done\n", - "type": "integer" - }, - "started": { - "description": "the number of started transactions\n", - "type": "integer" - } - }, - "required": [ - "started", - "committed", - "aborted", - "intermediateCommits" - ], - "type": "object" + "endpoint": { + "description": "The endpoint this request was sent to\n", + "type": "string" }, - "uptime": { - "description": "time the server is up and running\n", + "port": { + "description": "The port this request was sent to\n", "type": "integer" - }, - "v8Context": { - "description": "Statistics about the V8 javascript contexts\n", - "properties": { - "available": { - "description": "the number of currently spawned V8 contexts\n", - "type": "integer" - }, - "busy": { - "description": "the number of currently active V8 contexts\n", - "type": "integer" - }, - "dirty": { - "description": "the number of contexts that were previously used, and should now be garbage collected before being re-used\n", - "type": "integer" - }, - "free": { - "description": "the number of V8 contexts that are free to use\n", - "type": "integer" - }, - "max": { - "description": "the maximum number of V8 concurrent contexts we may spawn as configured by --javascript.v8-contexts\n", - "type": "integer" - }, - "memory": { - "description": "a list of V8 memory / garbage collection watermarks; Refreshed on every garbage collection run;\nPreserves min/max memory used at that time for 10 seconds\n", - "items": { - "properties": { - "contextId": { - "description": "ID of the context this set of memory statistics is from\n", - "type": "integer" - }, - "countOfTimes": { - "description": "how many times was the garbage collection run in these 10 seconds\n", - "type": "integer" - }, - "heapMax": { - "description": "High watermark of all garbage collection runs in 10 seconds\n", - "type": "integer" - }, - "heapMin": { - "description": "Low watermark of all garbage collection runs in these 10 seconds\n", - "type": "integer" - }, - "tMax": { - "description": "the timestamp where the 10 seconds interval started\n", - "type": "number" - } - }, - "required": [ - "contextId", - "tMax", - "countOfTimes", - "heapMax", - "heapMin" - ], - "type": "object" - }, - "type": "array" - }, - "min": { - "description": "the minimum number of V8 contexts that are spawned as configured by --javascript.v8-contexts-minimum\n", - "type": "integer" - } - }, - "required": [ - "available", - "busy", - "dirty", - "free", - "max", - "min", - "memory" - ], - "type": "object" } }, "required": [ - "uptime", - "physicalMemory", - "transactions", - "v8Context", - "threads" + "address", + "port", + "endpoint" ], "type": "object" }, - "system": { - "description": "metrics gathered from the system about this process; may depend on the host OS\n", - "properties": { - "majorPageFaults": { - "description": "pagefaults\n", - "type": "integer" - }, - "minorPageFaults": { - "description": "pagefaults\n", - "type": "integer" - }, - "numberOfThreads": { - "description": "the number of threads in the server\n", - "type": "integer" - }, - "residentSize": { - "description": "RSS of process\n", - "type": "integer" - }, - "residentSizePercent": { - "description": "RSS of process in %\n", - "type": "number" - }, - "systemTime": { - "description": "the system CPU time used by the server process\n", - "type": "number" - }, - "userTime": { - "description": "the user CPU time used by the server process\n", - "type": "number" - }, - "virtualSize": { - "description": "VSS of the process\n", - "type": "integer" - } + "suffix": { + "description": "A list of the decoded URL path suffixes. You can query the endpoint with\narbitrary suffixes, e.g. `/_admin/echo/foo/123`\n", + "items": { + "type": "string" }, - "required": [ - "minorPageFaults", - "majorPageFaults", - "userTime", - "systemTime", - "numberOfThreads", - "residentSize", - "residentSizePercent", - "virtualSize" - ], - "type": "object" + "type": "array" }, - "time": { - "description": "the current server timestamp\n", - "type": "integer" + "url": { + "description": "The raw request URL\n", + "type": "string" + }, + "user": { + "description": "The name of the current user that sent this request\n", + "type": "string" } }, "required": [ - "error", - "code", - "time", - "errorMessage", - "enabled", - "system", + "authorized", + "user", + "isAdminUser", + "database", + "url", + "protocol", + "portType", + "server", "client", - "http", - "server" + "internals", + "prefix", + "headers", + "requestType", + "requestBody", + "rawRequestBody", + "parameters", + "cookies", + "suffix", + "rawSuffix", + "path" ], "type": "object" } } }, - "description": "Statistics were returned successfully.\n" - }, - "404": { - "description": "Statistics are disabled on the instance.\n" + "description": "Echo was returned successfully.\n" } }, - "summary": "Get the statistics", + "summary": "Echo a request", "tags": [ - "Monitoring" + "Administration" ] } }, - "/_admin/statistics-description": { - "get": { - "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/metrics/v2` instead, which provides the data exposed by the\nstatistics API and a lot more.\n\n\nReturns a description of the statistics returned by `/_admin/statistics`.\nThe returned objects contains an array of statistics groups in the attribute\n`groups` and an array of statistics figures in the attribute `figures`.\n\nA statistics group is described by\n\n- `group`: The identifier of the group.\n- `name`: The name of the group.\n- `description`: A description of the group.\n\nA statistics figure is described by\n\n- `group`: The identifier of the group to which this figure belongs.\n- `identifier`: The identifier of the figure. It is unique within the group.\n- `name`: The name of the figure.\n- `description`: A description of the figure.\n- `type`: Either `current`, `accumulated`, or `distribution`.\n- `cuts`: The distribution vector.\n- `units`: Units in which the figure is measured.\n", - "operationId": "getStatisticsDescription", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" - }, - "error": { - "description": "the error, `false` in this case\n", - "type": "boolean" - }, - "figures": { - "description": "A statistics figure\n", - "items": { - "properties": { - "cuts": { - "description": "The distribution vector.\n", - "type": "string" - }, - "description": { - "description": "A description of the figure.\n", - "type": "string" - }, - "group": { - "description": "The identifier of the group to which this figure belongs.\n", - "type": "string" - }, - "identifier": { - "description": "The identifier of the figure. It is unique within the group.\n", - "type": "string" - }, - "name": { - "description": "The name of the figure.\n", - "type": "string" - }, - "type": { - "description": "Either `current`, `accumulated`, or `distribution`.\n", - "type": "string" - }, - "units": { - "description": "Units in which the figure is measured.\n", - "type": "string" - } - }, - "required": [ - "group", - "identifier", - "name", - "description", - "type", - "cuts", - "units" - ], - "type": "object" - }, - "type": "array" - }, - "groups": { - "description": "A statistics group\n", - "items": { - "properties": { - "description": { - "description": "A description of the group.\n", - "type": "string" - }, - "group": { - "description": "The identifier of the group.\n", - "type": "string" - }, - "name": { - "description": "The name of the group.\n", - "type": "string" - } - }, - "required": [ - "group", - "name", - "description" - ], - "type": "object" - }, - "type": "array" - } - }, - "required": [ - "groups", - "figures", - "code", - "error" - ], - "type": "object" - } + "/_db/{database-name}/_admin/execute": { + "post": { + "description": "Executes the JavaScript code in the body on the server as the body\nof a function with no arguments. If you have a `return` statement\nthen the return value you produce will be returned as content type\n`application/json`. If the parameter `returnAsJSON` is set to\n`true`, the result will be a JSON object describing the return value\ndirectly, otherwise a string produced by JSON.stringify will be\nreturned.\n\nNote that this API endpoint will only be present if the server was\nstarted with the option `--javascript.allow-admin-execute true`.\n\nThe default value of this option is `false`, which disables the execution of\nuser-defined code and disables this API endpoint entirely.\nThis is also the recommended setting for production.\n", + "operationId": "executeCode", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "body": { + "description": "The request body is the JavaScript code to be executed.\n", + "type": "string" + } + }, + "required": [ + "body" + ], + "type": "object" } - }, - "description": "Description was returned successfully.\n" + } } }, - "summary": "Get the statistics description", + "responses": { + "200": { + "description": "is returned when everything went well, or if a timeout occurred. In the\nlatter case a body of type application/json indicating the timeout\nis returned. depending on `returnAsJSON` this is a json object or a plain string.\n" + }, + "403": { + "description": "is returned if ArangoDB is not running in cluster mode.\n" + }, + "404": { + "description": "is returned if ArangoDB was not compiled for cluster operation.\n" + } + }, + "summary": "Execute a script", "tags": [ - "Monitoring" + "Administration" ] } }, - "/_admin/status": { + "/_db/{database-name}/_admin/license": { "get": { - "description": "Returns status information about the server.\n", - "operationId": "getStatus", + "description": "View the license information and status of an Enterprise Edition instance.\nCan be called on single servers, Coordinators, and DB-Servers.\n", + "operationId": "getLicense", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], "responses": { "200": { "content": { "application/json": { "schema": { "properties": { - "agency": { - "description": "Information about the Agency.\n*Cluster only* (Coordinators and DB-Servers).\n", - "properties": { - "agencyComm": { - "description": "Information about the communication with the Agency.\n*Cluster only* (Coordinators and DB-Servers).\n", - "properties": { - "endpoints": { - "description": "A list of possible Agency endpoints.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - } - }, - "type": "object" - }, - "agent": { - "description": "Information about the Agents.\n*Cluster only* (Agents)\n", + "features": { + "description": "The properties of the license.\n", "properties": { - "endpoint": { - "description": "The endpoint of the queried Agent.\n", - "type": "string" - }, - "id": { - "description": "Server ID of the queried Agent.\n", - "type": "string" - }, - "leaderId": { - "description": "Server ID of the leading Agent.\n", - "type": "string" - }, - "leading": { - "description": "Whether the queried Agent is the leader.\n", - "type": "boolean" - }, - "term": { - "description": "The current term number.\n", + "expires": { + "description": "The `expires` key lists the expiry date as Unix timestamp (seconds since\nJanuary 1st, 1970 UTC).\n", + "example": 1683173040, "type": "number" } }, + "required": [ + "expires" + ], "type": "object" }, - "coordinator": { - "description": "Information about the Coordinators.\n*Cluster only* (Coordinators)\n", - "properties": { - "foxxmaster": { - "description": "The server ID of the Coordinator that is the Foxx master.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "isFoxxmaster": { - "description": "Whether the queried Coordinator is the Foxx master.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "foxxApi": { - "description": "Whether the Foxx API is enabled.\n", - "type": "boolean" - }, - "host": { - "description": "A host identifier defined by the `HOST` or `NODE_NAME` environment variable,\nor a fallback value using a machine identifier or the cluster/Agency address.\n", - "type": "string" - }, - "hostname": { - "description": "A hostname defined by the `HOSTNAME` environment variable.\n", + "hash": { + "description": "The hash value of the license.\n", + "example": "982db5...44f3", "type": "string" }, "license": { - "description": "ArangoDB Edition, either `\"community\"` or `\"enterprise\"`.\n", + "description": "The encrypted license key in Base64 encoding, or `\"none\"`\nin the Community Edition.\n", + "example": "V0h/W...wEDw==", "type": "string" }, - "mode": { - "description": "Either `\"server\"` or `\"console\"`. **Deprecated**, use `operationMode` instead.\n", + "status": { + "description": "The `status` key allows you to confirm the state of the installed license on a\nglance.\n\n- `good`: The license is valid for more than 2 weeks.\n- `expiring`: The license is valid for less than 2 weeks.\n- `expired`: The license has expired. In this situation, no new\n Enterprise Edition features can be utilized.\n- `read-only`: The license is expired over 2 weeks. The instance is now\n restricted to read-only mode.\n", + "enum": [ + "good", + "expiring", + "expired", + "read-only" + ], + "example": "good", "type": "string" }, - "operationMode": { - "description": "Either `\"server\"` or `\"console\"`.\n", - "type": "string" + "upgrading": { + "description": "Whether the server is performing a database upgrade.\n", + "example": false, + "type": "boolean" }, - "pid": { - "description": "The process ID of _arangod_.\n", + "version": { + "description": "The license version number.\n", + "example": 1, "type": "number" - }, - "server": { - "description": "Always `\"arango\"`.\n", - "type": "string" - }, - "serverInfo": { - "description": "Information about the server status.\n", - "properties": { - "address": { - "description": "The address of the server, e.g. `tcp://[::1]:8530`.\n*Cluster only* (Coordinators and DB-Servers).\n", - "type": "string" - }, - "maintenance": { - "description": "Whether the maintenance mode is enabled.\n", - "type": "boolean" - }, - "persistedId": { - "description": "The persisted ID, e. g. `\"CRDN-e427b441-5087-4a9a-9983-2fb1682f3e2a\"`.\n*Cluster only* (Agents, Coordinators, and DB-Servers).\n", - "type": "string" - }, - "progress": { - "description": "Startup and recovery information.\n\nYou can check for changes to determine whether progress was made between two\ncalls, but you should not rely on specific values as they may change between\nArangoDB versions. The values are only expected to change during the startup and\nshutdown, i.e. while `maintenance` is `true`.\n\nYou need to start _arangod_ with the `--server.early-connections` startup option\nenabled to be able to query the endpoint during the startup process.\nIf authentication is enabled, then you need to use the super-user JWT for the\nrequest because the user management is not available during the startup.\n", - "properties": { - "feature": { - "description": "Internal name of the feature that is currently being prepared, started,\nstopped or unprepared.\n", - "type": "string" - }, - "phase": { - "description": "Name of the lifecycle phase the instance is currently in. Normally one of\n`\"in prepare\"`, `\"in start\"`, `\"in wait\"`, `\"in shutdown\"`, `\"in stop\"`,\nor `\"in unprepare\"`.\n", - "type": "string" - }, - "recoveryTick": { - "description": "Current recovery sequence number value, if the instance is currently recovering.\nIf the instance is already past the recovery, this attribute will contain the\nlast handled recovery sequence number.\n", - "type": "number" - } - }, - "required": [ - "phase", - "feature", - "recoveryTick" - ], - "type": "object" - }, - "readOnly": { - "description": "Whether writes are disabled.\n", - "type": "boolean" - }, - "rebootId": { - "description": "The reboot ID. Changes on every restart.\n*Cluster only* (Agents, Coordinators, and DB-Servers).\n", - "type": "number" - }, - "role": { - "description": "Either `\"SINGLE\"`, `\"COORDINATOR\"`, `\"PRIMARY\"` (DB-Server), or `\"AGENT\"`.\n", - "type": "string" - }, - "serverId": { - "description": "The server ID, e.g. `\"CRDN-e427b441-5087-4a9a-9983-2fb1682f3e2a\"`.\n*Cluster only* (Coordinators and DB-Servers).\n", - "type": "string" - }, - "state": { - "description": "Either `\"STARTUP\"`, `\"SERVING\"`, or `\"SHUTDOWN\"`.\n*Cluster only* (Coordinators and DB-Servers).\n", - "type": "string" + } + }, + "required": [ + "license" + ], + "type": "object" + } + } + }, + "description": "Returns the license information.\n" + } + }, + "summary": "Get information about the current license", + "tags": [ + "Administration" + ] + }, + "put": { + "description": "Set a new license for an Enterprise Edition instance.\nCan be called on single servers, Coordinators, and DB-Servers.\n", + "operationId": "setLicense", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Set to `true` to change the license even if it expires sooner than the current one.\n", + "in": "query", + "name": "force", + "required": false, + "schema": { + "default": false, + "type": "boolean" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "The request body has to contain the Base64-encoded string wrapped in double quotes.\n", + "example": "eyJncmFudCI6...(Base64-encoded license string)...", + "type": "string" + } + } + } + }, + "responses": { + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "result": { + "properties": { + "code": { + "description": "The HTTP status code.\n", + "example": 201, + "type": "integer" }, - "writeOpsEnabled": { - "description": "Whether writes are enabled. **Deprecated**, use `readOnly` instead.\n", + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, "type": "boolean" } }, "required": [ - "progress", - "role", - "writeOpsEnabled", - "readOnly", - "maintenance" + "error", + "code" ], "type": "object" - }, - "version": { - "description": "The server version as a string.\n", - "type": "string" } }, "required": [ - "server", - "license", - "version", - "mode", - "operationMode", - "foxxApi", - "host", - "pid", - "serverInfo" + "result" ], "type": "object" } } }, - "description": "Status information was returned successfully.\n" - } - }, - "summary": "Get server status information", - "tags": [ - "Administration" - ] - } - }, - "/_admin/support-info": { - "get": { - "description": "Retrieves deployment information for support purposes. The endpoint returns data\nabout the ArangoDB version used, the host (operating system, server ID, CPU and\nstorage capacity, current utilization, a few metrics) and the other servers in\nthe deployment (in case of cluster deployments).\n\nAs this API may reveal sensitive data about the deployment, it can only be\naccessed from inside the `_system` database. In addition, there is a policy\ncontrol startup option `--server.support-info-api` that controls if and to whom\nthe API is made available.\n", - "operationId": "getSupportInfo", - "responses": { - "200": { + "description": "License successfully deployed.\n" + }, + "400": { "content": { "application/json": { "schema": { "properties": { - "date": { - "description": "ISO 8601 datetime string of when the information was requested.\n", - "type": "string" + "code": { + "description": "The HTTP status code.\n", + "example": 400, + "type": "integer" }, - "deployment": { - "description": "An object with at least a `type` attribute, indicating the deployment mode.\n\nIn case of a `\"single\"` server, additional information is provided in the\ntop-level `host` attribute.\n\nIn case of a `\"cluster\"`, there is a `servers` object that contains a nested\nobject for each Coordinator and DB-Server, using the server ID as key. Each\nobject holds information about the ArangoDB instance as well as the host machine.\nThere are additional attributes for the number of `agents`, `coordinators`,\n`dbServers`, and `shards`.\n", - "type": "object" + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" }, - "host": { - "description": "An object that holds information about the ArangoDB instance as well as the\nhost machine. Only set in case of single servers.\n", - "type": "object" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number.\n", + "type": "integer" } }, "required": [ - "date", - "deployment" + "error", + "code", + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "" + "description": "If the license expires earlier than the previously installed one,\nor if the supplied license string is invalid.\n" }, - "404": { - "description": "The support info API is turned off.\n" - } - }, - "summary": "Get information about the deployment", - "tags": [ - "Administration" - ] - } - }, - "/_admin/time": { - "get": { - "description": "The call returns an object with the `time` attribute. This contains the\ncurrent system time as a Unix timestamp with microsecond precision.\n", - "operationId": "getTime", - "responses": { - "200": { + "501": { "content": { "application/json": { "schema": { "properties": { "code": { - "description": "the HTTP status code\n", + "description": "The HTTP status code.\n", + "example": 501, "type": "integer" }, "error": { - "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", + "description": "A flag indicating that an error occurred.\n", + "example": true, "type": "boolean" }, - "time": { - "description": "The current system time as a Unix timestamp with microsecond precision of the server\n", - "type": "number" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number.\n", + "type": "integer" } }, "required": [ "error", "code", - "time" + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Time was returned successfully.\n" + "description": "If you try to apply a license in the Community Edition.\n" } }, - "summary": "Get the system time", + "summary": "Set a new license", "tags": [ "Administration" ] } }, - "/_api/analyzer": { + "/_db/{database-name}/_admin/metrics": { "get": { - "description": "Retrieves a an array of all Analyzer definitions.\nThe resulting array contains objects with the following attributes:\n- `name`: the Analyzer name\n- `type`: the Analyzer type\n- `properties`: the properties used to configure the specified type\n- `features`: the set of features to set on the Analyzer generated fields\n", - "operationId": "listAnalyzers", + "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/metrics/v2` instead. From version 3.10.0 onward, `/_admin/metrics`\nreturns the same metrics as `/_admin/metrics/v2`.\n\n\nReturns the instance's current metrics in Prometheus format. The\nreturned document collects all instance metrics, which are measured\nat any given time and exposes them for collection by Prometheus.\n\nThe document contains different metrics and metrics groups dependent\non the role of the queried instance. All exported metrics are\npublished with the `arangodb_` or `rocksdb_` string to distinguish\nthem from other collected data.\n\nThe API then needs to be added to the Prometheus configuration file\nfor collection.\n", + "operationId": "getMetrics", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Returns metrics of the specified server. If no serverId is given, the asked\nserver will reply. This parameter is only meaningful on Coordinators.\n", + "in": "query", + "name": "serverId", + "required": false, + "schema": { + "type": "string" + } + } + ], "responses": { "200": { - "description": "The Analyzer definitions was retrieved successfully.\n" + "description": "Metrics were returned successfully.\n" + }, + "404": { + "description": "The metrics API may be disabled using `--server.export-metrics-api false`\nsetting in the server. In this case, the result of the call indicates the API\nto be not found.\n" } }, - "summary": "List all Analyzers", + "summary": "Get the metrics (deprecated)", "tags": [ - "Analyzers" - ] - }, - "post": { - "description": "Creates a new Analyzer based on the provided configuration.\n", - "operationId": "createAnalyzer", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "features": { - "description": "The set of features to set on the Analyzer generated fields.\nThe default value is an empty array.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "name": { - "description": "The Analyzer name.\n", - "type": "string" - }, - "properties": { - "description": "The properties used to configure the specified Analyzer type.\n", - "type": "object" - }, - "type": { - "description": "The Analyzer type.\n", - "type": "string" - } - }, - "required": [ - "name", - "type" - ], - "type": "object" - } - } - } - }, - "responses": { - "200": { - "description": "An Analyzer with a matching name and definition already exists.\n" - }, - "201": { - "description": "A new Analyzer definition was successfully created.\n" - }, - "400": { - "description": "One or more of the required parameters is missing or one or more of the parameters\nis not valid.\n" - }, - "403": { - "description": "The user does not have permission to create and Analyzer with this configuration.\n" - } - }, - "summary": "Create an Analyzer", - "tags": [ - "Analyzers" + "Monitoring" ] } }, - "/_api/analyzer/{analyzer-name}": { - "delete": { - "description": "Removes an Analyzer configuration identified by `analyzer-name`.\n\nIf the Analyzer definition was successfully dropped, an object is returned with\nthe following attributes:\n- `error`: `false`\n- `name`: The name of the removed Analyzer\n", - "operationId": "deleteAnalyzer", + "/_db/{database-name}/_admin/metrics/v2": { + "get": { + "description": "Returns the instance's current metrics in Prometheus format. The\nreturned document collects all instance metrics, which are measured\nat any given time and exposes them for collection by Prometheus.\n\nThe document contains different metrics and metrics groups dependent\non the role of the queried instance. All exported metrics are\npublished with the prefix `arangodb_` or `rocksdb_` to distinguish them from\nother collected data.\n\nThe API then needs to be added to the Prometheus configuration file\nfor collection.\n", + "operationId": "getMetricsV2", "parameters": [ { - "description": "The name of the Analyzer to remove.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", "in": "path", - "name": "analyzer-name", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The Analyzer configuration should be removed even if it is in-use.\nThe default value is `false`.\n", + "description": "Returns metrics of the specified server. If no serverId is given, the asked\nserver will reply. This parameter is only meaningful on Coordinators.\n", "in": "query", - "name": "force", + "name": "serverId", "required": false, "schema": { - "type": "boolean" + "type": "string" } } ], "responses": { "200": { - "description": "The Analyzer configuration was removed successfully.\n" - }, - "400": { - "description": "The `analyzer-name` was not supplied or another request parameter was not\nvalid.\n" - }, - "403": { - "description": "The user does not have permission to remove this Analyzer configuration.\n" + "description": "Metrics were returned successfully.\n" }, "404": { - "description": "Such an Analyzer configuration does not exist.\n" - }, - "409": { - "description": "The specified Analyzer configuration is still in use and `force` was omitted or\n`false` specified.\n" + "description": "The metrics API may be disabled using `--server.export-metrics-api false`\nsetting in the server. In this case, the result of the call indicates the API\nto be not found.\n" } }, - "summary": "Remove an Analyzer", + "summary": "Get the metrics", "tags": [ - "Analyzers" + "Monitoring" ] - }, - "get": { - "description": "Retrieves the full definition for the specified Analyzer name.\nThe resulting object contains the following attributes:\n- `name`: the Analyzer name\n- `type`: the Analyzer type\n- `properties`: the properties used to configure the specified type\n- `features`: the set of features to set on the Analyzer generated fields\n", - "operationId": "getAnalyzer", + } + }, + "/_db/{database-name}/_admin/routing/reload": { + "post": { + "description": "Reloads the routing information from the `_routing` system collection if it\nexists, and makes Foxx rebuild its local routing table on the next request.\n", + "operationId": "reloadRouting", "parameters": [ { - "description": "The name of the Analyzer to retrieve.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", "in": "path", - "name": "analyzer-name", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -4113,28 +3268,26 @@ ], "responses": { "200": { - "description": "The Analyzer definition was retrieved successfully.\n" - }, - "404": { - "description": "Such an Analyzer configuration does not exist.\n" + "description": "The routing information has been reloaded successfully.\n" } }, - "summary": "Get an Analyzer definition", + "summary": "Reload the routing table", "tags": [ - "Analyzers" + "Administration" ] } }, - "/_api/aqlfunction": { + "/_db/{database-name}/_admin/server/jwt": { "get": { - "description": "Returns all registered user-defined functions (UDFs) for the use in AQL of the\ncurrent database.\n\nThe call returns a JSON array with status codes and all user functions found under `result`.\n", - "operationId": "listAqlUserFunctions", + "description": "Get information about the currently loaded secrets.\n\nTo utilize the API a superuser JWT token is necessary, otherwise the response\nwill be _HTTP 403 Forbidden_.\n", + "operationId": "getServerJwtSecrets", "parameters": [ { - "description": "Returns all registered AQL user functions from the specified namespace.\n", - "in": "query", - "name": "namespace", - "required": false, + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { "type": "string" } @@ -4145,9 +3298,10 @@ "content": { "application/json": { "schema": { + "description": "The reply with the JWT secrets information.\n", "properties": { "code": { - "description": "the HTTP status code\n", + "description": "the HTTP status code - 200 in this case\n", "type": "integer" }, "error": { @@ -4155,30 +3309,25 @@ "type": "boolean" }, "result": { - "description": "All functions, or the ones matching the `namespace` parameter\n", - "items": { - "properties": { - "code": { - "description": "A string representation of the function body\n", - "type": "string" - }, - "isDeterministic": { - "description": "an optional boolean value to indicate whether the function\nresults are fully deterministic (function return value solely depends on\nthe input value and return value is the same for repeated calls with same\ninput). The `isDeterministic` attribute is currently not used but may be\nused later for optimizations.\n", - "type": "boolean" - }, - "name": { - "description": "The fully qualified name of the user function\n", - "type": "string" - } + "description": "The result object.\n", + "properties": { + "active": { + "description": "An object with the SHA-256 hash of the active secret.\n", + "type": "object" }, - "required": [ - "name", - "code", - "isDeterministic" - ], - "type": "object" + "passive": { + "description": "An array of objects with the SHA-256 hashes of the passive secrets.\n\nCan be empty.\n", + "items": { + "type": "object" + }, + "type": "array" + } }, - "type": "array" + "required": [ + "active", + "passive" + ], + "type": "object" } }, "required": [ @@ -4190,72 +3339,71 @@ } } }, - "description": "on success *HTTP 200* is returned.\n" + "description": "" }, - "400": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" - }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", - "type": "boolean" - }, - "errorMessage": { - "description": "a descriptive error message\n", - "type": "string" - }, - "errorNum": { - "description": "the server error number\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "If the user function name is malformed, the server will respond with *HTTP 400*.\n" + "403": { + "description": "if the request was not authenticated as a user with sufficient rights\n" } }, - "summary": "List the registered user-defined AQL functions", + "summary": "Get information about the loaded JWT secrets", "tags": [ - "Queries" + "Authentication" + ] + } + }, + "/_db/{database-name}/_admin/server/mode": { + "get": { + "description": "Return mode information about a server. The json response will contain\na field `mode` with the value `readonly` or `default`. In a read-only server\nall write operations will fail with an error code of `1004` (_ERROR_READ_ONLY_).\nCreating or dropping of databases and collections will also fail with error code `11` (_ERROR_FORBIDDEN_).\n\nThis API requires authentication.\n", + "operationId": "getServerMode", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "This API will return HTTP 200 if everything is ok\n" + } + }, + "summary": "Return whether or not a server is in read-only mode", + "tags": [ + "Administration" ] }, - "post": { - "description": "Registers a user-defined function (UDF) written in JavaScript for the use in\nAQL queries in the current database.\n\nIn case of success, HTTP 200 is returned.\nIf the function isn't valid etc. HTTP 400 including a detailed error message will be returned.\n", - "operationId": "createAqlUserFunction", + "put": { + "description": "Update mode information about a server. The JSON response will contain\na field `mode` with the value `readonly` or `default`. In a read-only server\nall write operations will fail with an error code of `1004` (_ERROR_READ_ONLY_).\nCreating or dropping of databases and collections will also fail with error\ncode `11` (_ERROR_FORBIDDEN_).\n\nThis is a protected API. It requires authentication and administrative\nserver rights.\n", + "operationId": "setServerMode", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "code": { - "description": "a string representation of the function body.\n", - "type": "string" - }, - "isDeterministic": { - "description": "an optional boolean value to indicate whether the function\nresults are fully deterministic (function return value solely depends on\nthe input value and return value is the same for repeated calls with same\ninput). The `isDeterministic` attribute is currently not used but may be\nused later for optimizations.\n", - "type": "boolean" - }, - "name": { - "description": "the fully qualified name of the user functions.\n", + "mode": { + "description": "The mode of the server `readonly` or `default`.\n", "type": "string" } }, "required": [ - "name", - "code" + "mode" ], "type": "object" } @@ -4264,123 +3412,90 @@ }, "responses": { "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" - }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", - "type": "boolean" - }, - "isNewlyCreated": { - "description": "boolean flag to indicate whether the function was newly created (`false` in this case)\n", - "type": "boolean" - } - }, - "required": [ - "error", - "code", - "isNewlyCreated" - ], - "type": "object" - } - } - }, - "description": "If the function already existed and was replaced by the\ncall, the server will respond with *HTTP 200*.\n" - }, - "201": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" - }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", - "type": "boolean" - }, - "isNewlyCreated": { - "description": "boolean flag to indicate whether the function was newly created (`true` in this case)\n", - "type": "boolean" - } - }, - "required": [ - "error", - "code", - "isNewlyCreated" - ], - "type": "object" - } - } - }, - "description": "If the function can be registered by the server, the server will respond with\n*HTTP 201*.\n" + "description": "This API will return HTTP 200 if everything is ok\n" }, - "400": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" - }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", - "type": "boolean" - }, - "errorMessage": { - "description": "a descriptive error message\n", - "type": "string" - }, - "errorNum": { - "description": "the server error number\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "If the JSON representation is malformed or mandatory data is missing from the\nrequest, the server will respond with *HTTP 400*.\n" + "401": { + "description": "if the request was not authenticated as a user with sufficient rights\n" } }, - "summary": "Create a user-defined AQL function", + "summary": "Set the server mode to read-only or default", "tags": [ - "Queries" + "Administration" + ] + } + }, + "/_db/{database-name}/_admin/server/tls": { + "get": { + "description": "Return a summary of the TLS data. The JSON response will contain a field\n`result` with the following components:\n\n - `keyfile`: Information about the key file.\n - `clientCA`: Information about the Certificate Authority (CA) for\n client certificate verification.\n\nIf server name indication (SNI) is used and multiple key files are\nconfigured for different server names, then there is an additional\nattribute `SNI`, which contains for each configured server name\nthe corresponding information about the key file for that server name.\n\nIn all cases the value of the attribute will be a JSON object, which\nhas a subset of the following attributes (whatever is appropriate):\n\n - `sha256`: The value is a string with the SHA256 of the whole input\n file.\n - `certificates`: The value is a JSON array with the public\n certificates in the chain in the file.\n - `privateKeySha256`: In cases where there is a private key (`keyfile`\n but not `clientCA`), this field is present and contains a\n JSON string with the SHA256 of the private key.\n\nThis API requires authentication.\n", + "operationId": "getServerTls", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "This API will return HTTP 200 if everything is ok\n" + } + }, + "summary": "Get the TLS data", + "tags": [ + "Security" ] } }, - "/_api/aqlfunction/{name}": { + "/_db/{database-name}/_admin/shutdown": { "delete": { - "description": "Deletes an existing user-defined function (UDF) or function group identified by\n`name` from the current database.\n", - "operationId": "deleteAqlUserFunction", + "description": "This call initiates a clean shutdown sequence. Requires administrative privileges.\n", + "operationId": "startShutdown", "parameters": [ { - "description": "the name of the AQL user function.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", "in": "path", - "name": "name", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "- `true`: The function name provided in `name` is treated as\n a namespace prefix, and all functions in the specified namespace will be deleted.\n The returned number of deleted functions may become 0 if none matches the string.\n- `false`: The function name provided in `name` must be fully\n qualified, including any namespaces. If none matches the `name`, HTTP 404 is returned.\n", + "description": "\u003csmall\u003eIntroduced in: v3.7.12, v3.8.1, v3.9.0\u003c/small\u003e\n\nIf set to `true`, this initiates a soft shutdown. This is only available\non Coordinators. When issued, the Coordinator tracks a number of ongoing\noperations, waits until all have finished, and then shuts itself down\nnormally. It will still accept new operations.\n\nThis feature can be used to make restart operations of Coordinators less\nintrusive for clients. It is designed for setups with a load balancer in front\nof Coordinators. Remove the designated Coordinator from the load balancer before\nissuing the soft-shutdown. The remaining Coordinators will internally forward\nrequests that need to be handled by the designated Coordinator. All other\nrequests will be handled by the remaining Coordinators, reducing the designated\nCoordinator's load.\n\nThe following types of operations are tracked:\n\n - AQL cursors (in particular streaming cursors)\n - Transactions (in particular stream transactions)\n - Ongoing asynchronous requests (using the `x-arango-async: store` HTTP header)\n - Finished asynchronous requests, whose result has not yet been\n collected\n - Queued low priority requests (most normal requests)\n - Ongoing low priority requests\n", "in": "query", - "name": "group", + "name": "soft", "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "is returned in all cases, `OK` will be returned in the result buffer on success.\n" + } + }, + "summary": "Start the shutdown sequence", + "tags": [ + "Administration" + ] + }, + "get": { + "description": "\u003csmall\u003eIntroduced in: v3.7.12, v3.8.1, v3.9.0\u003c/small\u003e\n\nThis call reports progress about a soft Coordinator shutdown (see\ndocumentation of `DELETE /_admin/shutdown?soft=true`).\nIn this case, the following types of operations are tracked:\n\n - AQL cursors (in particular streaming cursors)\n - Transactions (in particular stream transactions)\n - Ongoing asynchronous requests (using the `x-arango-async: store` HTTP header)\n - Finished asynchronous requests, whose result has not yet been\n collected\n - Queued low priority requests (most normal requests)\n - Ongoing low priority requests\n\nThis API is only available on Coordinators.\n", + "operationId": "getShutdownProgress", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { "type": "string" } @@ -4392,624 +3507,597 @@ "application/json": { "schema": { "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" - }, - "deletedCount": { - "description": "The number of deleted user functions, always `1` when `group` is set to `false`.\nAny number `\u003e= 0` when `group` is set to `true`.\n", - "type": "integer" + "AQLcursors": { + "description": "Number of AQL cursors that are still active.\n", + "type": "number" }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", + "allClear": { + "description": "Whether all active operations finished.\n", "type": "boolean" - } - }, - "required": [ - "error", - "code", - "deletedCount" - ], - "type": "object" - } - } - }, - "description": "If the function can be removed by the server, the server will respond with\n*HTTP 200*.\n" - }, - "400": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", - "type": "boolean" + "doneJobs": { + "description": "Number of finished asynchronous requests, whose result has not yet been collected.\n", + "type": "number" }, - "errorMessage": { - "description": "a descriptive error message\n", - "type": "string" + "lowPrioOngoingRequests": { + "description": "Number of queued low priority requests.\n", + "type": "number" }, - "errorNum": { - "description": "the server error number\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "If the user function name is malformed, the server will respond with *HTTP 400*.\n" - }, - "404": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" + "lowPrioQueuedRequests": { + "description": "Number of ongoing low priority requests.\n", + "type": "number" }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", - "type": "boolean" + "pendingJobs": { + "description": "Number of ongoing asynchronous requests.\n", + "type": "number" }, - "errorMessage": { - "description": "a descriptive error message\n", - "type": "string" + "softShutdownOngoing": { + "description": "Whether a soft shutdown of the Coordinator is in progress.\n", + "type": "boolean" }, - "errorNum": { - "description": "the server error number\n", - "type": "integer" + "transactions": { + "description": "Number of ongoing transactions.\n", + "type": "number" } }, "required": [ - "error", - "code", - "errorNum", - "errorMessage" + "softShutdownOngoing", + "AQLcursors", + "transactions", + "pendingJobs", + "doneJobs", + "lowPrioOngoingRequests", + "lowPrioQueuedRequests", + "allClear" ], "type": "object" } } }, - "description": "If the specified user function does not exist, the server will respond with *HTTP 404*.\n" + "description": "The response indicates the fact that a soft shutdown is ongoing and the\nnumber of active operations of the various types. Once all numbers have gone\nto 0, the flag `allClear` is set and the Coordinator shuts down automatically.\n" } }, - "summary": "Remove a user-defined AQL function", + "summary": "Query the soft shutdown progress", "tags": [ - "Queries" + "Administration" ] } }, - "/_api/batch": { - "post": { - "description": "Executes a batch request. A batch request can contain any number of\nother requests that can be sent to ArangoDB in isolation. The benefit of\nusing batch requests is that batching requests requires less client/server\nroundtrips than when sending isolated requests.\n\nAll parts of a batch request are executed serially on the server. The\nserver will return the results of all parts in a single response when all\nparts are finished.\n\nTechnically, a batch request is a multipart HTTP request, with\ncontent-type `multipart/form-data`. A batch request consists of an\nenvelope and the individual batch part actions. Batch part actions\nare \"regular\" HTTP requests, including full header and an optional body.\nMultiple batch parts are separated by a boundary identifier. The\nboundary identifier is declared in the batch envelope. The MIME content-type\nfor each individual batch part must be `application/x-arango-batchpart`.\n\nPlease note that when constructing the individual batch parts, you must\nuse CRLF (`\\r\\n`) as the line terminator as in regular HTTP messages.\n\nThe response sent by the server will be an `HTTP 200` response, with an\noptional error summary header `x-arango-errors`. This header contains the\nnumber of batch part operations that failed with an HTTP error code of at\nleast 400. This header is only present in the response if the number of\nerrors is greater than zero.\n\nThe response sent by the server is a multipart response, too. It contains\nthe individual HTTP responses for all batch parts, including the full HTTP\nresult header (with status code and other potential headers) and an\noptional result body. The individual batch parts in the result are\nseparated using the same boundary value as specified in the request.\n\nThe order of batch parts in the response will be the same as in the\noriginal client request. Client can additionally use the `Content-Id`\nMIME header in a batch part to define an individual id for each batch part.\nThe server will return this id is the batch part responses, too.\n", - "operationId": "executeBatchRequest", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "body": { - "description": "The multipart batch request, consisting of the envelope and the individual\nbatch parts.\n", - "type": "string" - } - }, - "required": [ - "body" - ], - "type": "object" - } - } - } - }, - "responses": { - "200": { - "description": "is returned if the batch was received successfully. HTTP 200 is returned\neven if one or multiple batch part actions failed.\n" - }, - "400": { - "description": "is returned if the batch envelope is malformed or incorrectly formatted.\nThis code will also be returned if the content-type of the overall batch\nrequest or the individual MIME parts is not as expected.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - } - }, - "summary": "Execute a batch request", - "tags": [ - "Batch Requests" - ] - } - }, - "/_api/cluster/endpoints": { - "get": { - "description": "Returns an object with an attribute `endpoints`, which contains an\narray of objects, which each have the attribute `endpoint`, whose value\nis a string with the endpoint description. There is an entry for each\nCoordinator in the cluster. This method only works on Coordinators in\ncluster mode. In case of an error the `error` attribute is set to\n`true`.\n", - "operationId": "listClusterEndpoints", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code - 200\n", - "type": "integer" - }, - "endpoints": { - "description": "A list of active cluster endpoints.\n", - "items": { - "properties": { - "endpoint": { - "description": "The bind of the Coordinator, like `tcp://[::1]:8530`\n", - "type": "string" - } - }, - "required": [ - "endpoint" - ], - "type": "object" - }, - "type": "array" - }, - "error": { - "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", - "type": "boolean" - } - }, - "required": [ - "error", - "code", - "endpoints" - ], - "type": "object" - } - } - }, - "description": "is returned when everything went well.\n" - }, - "501": { - "description": "server is not a Coordinator or method was not GET.\n" - } - }, - "summary": "List all Coordinator endpoints", - "tags": [ - "Cluster" - ] - } - }, - "/_api/collection": { + "/_db/{database-name}/_admin/statistics": { "get": { - "description": "Returns an object with a `result` attribute containing an array with the\ndescriptions of all collections in the current database.\n\nBy providing the optional `excludeSystem` query parameter with a value of\n`true`, all system collections are excluded from the response.\n", - "operationId": "listCollections", + "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/metrics/v2` instead, which provides the data exposed by this API\nand a lot more.\n\n\nReturns the statistics information. The returned object contains the\nstatistics figures grouped together according to the description returned by\n`/_admin/statistics-description`. For instance, to access a figure `userTime`\nfrom the group `system`, you first select the sub-object describing the\ngroup stored in `system` and in that sub-object the value for `userTime` is\nstored in the attribute of the same name.\n\nIn case of a distribution, the returned object contains the total count in\n`count` and the distribution list in `counts`. The sum (or total) of the\nindividual values is returned in `sum`.\n\nThe transaction statistics show the local started, committed and aborted\ntransactions as well as intermediate commits done for the server queried. The\nintermediate commit count will only take non zero values for the RocksDB\nstorage engine. Coordinators do almost no local transactions themselves in\ntheir local databases, therefor cluster transactions (transactions started on a\nCoordinator that require DB-Servers to finish before the transactions is\ncommitted cluster wide) are just added to their local statistics. This means\nthat the statistics you would see for a single server is roughly what you can\nexpect in a cluster setup using a single Coordinator querying this Coordinator.\nJust with the difference that cluster transactions have no notion of\nintermediate commits and will not increase the value.\n", + "operationId": "getStatistics", "parameters": [ { - "description": "Whether or not system collections should be excluded from the result.\n", - "in": "query", - "name": "excludeSystem", - "required": false, + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], "responses": { "200": { - "description": "The list of collections\n" - } - }, - "summary": "List all collections", - "tags": [ - "Collections" - ] - }, - "post": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nCreates a new collection with a given name. The request must contain an\nobject with the following attributes.\n", - "operationId": "createCollection", - "parameters": [ - { - "description": "The default is `true`, which means the server only reports success back to the\nclient when all replicas have created the collection. Set it to `false` if you want\nfaster server responses and don't care about full replication.\n", - "in": "query", - "name": "waitForSyncReplication", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "The default is `true`, which means the server checks if there are enough replicas\navailable at creation time and bail out otherwise. Set it to `false` to disable\nthis extra check.\n", - "in": "query", - "name": "enforceReplicationFactor", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "cacheEnabled": { - "description": "Whether the in-memory hash cache for documents should be enabled for this\ncollection (default: `false`). Can be controlled globally with the `--cache.size`\nstartup option. The cache can speed up repeated reads of the same documents via\ntheir document keys. If the same documents are not fetched often or are\nmodified frequently, then you may disable the cache to avoid the maintenance\ncosts.\n", - "type": "boolean" - }, - "computedValues": { - "description": "An optional list of objects, each representing a computed value.\n", - "items": { + "content": { + "application/json": { + "schema": { + "properties": { + "client": { + "description": "information about the connected clients and their resource usage\n", "properties": { - "computeOn": { - "description": "An array of strings to define on which write operations the value shall be\ncomputed. The possible values are `\"insert\"`, `\"update\"`, and `\"replace\"`.\nThe default is `[\"insert\", \"update\", \"replace\"]`.\n", - "items": { - "type": "string" + "bytesReceived": { + "description": "number of bytes received from the clients\n", + "properties": { + "count": { + "description": "number of values summarized\n", + "type": "integer" + }, + "counts": { + "description": "array containing the values\n", + "items": { + "type": "integer" + }, + "type": "array" + }, + "sum": { + "description": "summarized value of all counts\n", + "type": "number" + } }, - "type": "array" - }, - "expression": { - "description": "An AQL `RETURN` operation with an expression that computes the desired value.\nSee [Computed Value Expressions](../../concepts/data-structure/documents/computed-values.md#computed-value-expressions) for details.\n", - "type": "string" + "required": [ + "sum", + "count", + "counts" + ], + "type": "object" }, - "failOnWarning": { - "description": "Whether to let the write operation fail if the expression produces a warning.\nThe default is `false`.\n", - "type": "boolean" + "bytesSent": { + "description": "number of bytes sent to the clients\n", + "properties": { + "count": { + "description": "number of values summarized\n", + "type": "integer" + }, + "counts": { + "description": "array containing the values\n", + "items": { + "type": "integer" + }, + "type": "array" + }, + "sum": { + "description": "summarized value of all counts\n", + "type": "number" + } + }, + "required": [ + "sum", + "count", + "counts" + ], + "type": "object" }, - "keepNull": { - "description": "Whether the target attribute shall be set if the expression evaluates to `null`.\nYou can set the option to `false` to not set (or unset) the target attribute if\nthe expression returns `null`. The default is `true`.\n", - "type": "boolean" + "connectionTime": { + "description": "total connection times\n", + "properties": { + "count": { + "description": "number of values summarized\n", + "type": "integer" + }, + "counts": { + "description": "array containing the values\n", + "items": { + "type": "integer" + }, + "type": "array" + }, + "sum": { + "description": "summarized value of all counts\n", + "type": "number" + } + }, + "required": [ + "sum", + "count", + "counts" + ], + "type": "object" }, - "name": { - "description": "The name of the target attribute. Can only be a top-level attribute, but you\nmay return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`,\nor a shard key attribute.\n", - "type": "string" + "httpConnections": { + "description": "the number of open http connections\n", + "type": "integer" }, - "overwrite": { - "description": "Whether the computed value shall take precedence over a user-provided or\nexisting attribute.\n", - "type": "boolean" - } - }, - "required": [ - "name", - "expression", - "overwrite" - ], - "type": "object" - }, - "type": "array" - }, - "distributeShardsLike": { - "description": "The name of another collection. If this property is set in a cluster, the\ncollection copies the `replicationFactor`, `numberOfShards` and `shardingStrategy`\nproperties from the specified collection (referred to as the _prototype collection_)\nand distributes the shards of this collection in the same way as the shards of\nthe other collection. In an Enterprise Edition cluster, this data co-location is\nutilized to optimize queries.\n\nYou need to use the same number of `shardKeys` as the prototype collection, but\nyou can use different attributes.\n\nThe default is `\"\"`.\n\n\u003e **INFO:**\nUsing this parameter has consequences for the prototype\ncollection. It can no longer be dropped, before the sharding-imitating\ncollections are dropped. Equally, backups and restores of imitating\ncollections alone generate warnings (which can be overridden)\nabout a missing sharding prototype.\n", - "type": "string" - }, - "isDisjoint": { - "description": "Whether the collection is for a Disjoint SmartGraph\n(Enterprise Edition only). This is an internal property.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the collection is for a SmartGraph or EnterpriseGraph\n(Enterprise Edition only). This is an internal property.\n", - "type": "boolean" - }, - "isSystem": { - "description": "If `true`, create a system collection. In this case, the `collection-name`\nshould start with an underscore. End-users should normally create non-system\ncollections only. API implementors may be required to create system\ncollections in very special occasions, but normally a regular collection will do.\n(The default is `false`)\n", - "type": "boolean" - }, - "keyOptions": { - "description": "additional options for key generation. If specified, then `keyOptions`\nshould be a JSON object containing the following attributes:\n", - "properties": { - "allowUserKeys": { - "description": "If set to `true`, then you are allowed to supply own key values in the\n`_key` attribute of documents. If set to `false`, then the key generator\nis solely be responsible for generating keys and an error is raised if you\nsupply own key values in the `_key` attribute of documents.\n\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", - "type": "boolean" - }, - "increment": { - "description": "increment value for `autoincrement` key generator. Not used for other key\ngenerator types.\n", - "type": "integer" - }, - "offset": { - "description": "Initial offset value for `autoincrement` key generator.\nNot used for other key generator types.\n", - "type": "integer" - }, - "type": { - "description": "specifies the type of the key generator. The currently available generators are\n`traditional`, `autoincrement`, `uuid` and `padded`.\n\n- The `traditional` key generator generates numerical keys in ascending order.\n The sequence of keys is not guaranteed to be gap-free.\n\n- The `autoincrement` key generator generates numerical keys in ascending order,\n the initial offset and the spacing can be configured (**note**: `autoincrement`\n is currently only supported for non-sharded collections).\n The sequence of generated keys is not guaranteed to be gap-free, because a new key\n will be generated on every document insert attempt, not just for successful\n inserts.\n\n- The `padded` key generator generates keys of a fixed length (16 bytes) in\n ascending lexicographical sort order. This is ideal for the RocksDB storage engine,\n which will slightly benefit keys that are inserted in lexicographically\n ascending order. The key generator can be used in a single-server or cluster.\n The sequence of generated keys is not guaranteed to be gap-free.\n\n- The `uuid` key generator generates universally unique 128 bit keys, which\n are stored in hexadecimal human-readable format. This key generator can be used\n in a single-server or cluster to generate \"seemingly random\" keys. The keys\n produced by this key generator are not lexicographically sorted.\n\nPlease note that keys are only guaranteed to be truly ascending in single\nserver deployments and for collections that only have a single shard (that includes\ncollections in a OneShard database).\nThe reason is that for collections with more than a single shard, document keys\nare generated on Coordinator(s). For collections with a single shard, the document\nkeys are generated on the leader DB-Server, which has full control over the key\nsequence.\n", - "type": "string" - } - }, - "required": [ - "type", - "allowUserKeys", - "increment", - "offset" - ], - "type": "object" - }, - "name": { - "description": "The name of the collection.\n", - "type": "string" - }, - "numberOfShards": { - "description": "(The default is `1`): in a cluster, this value determines the\nnumber of shards to create for the collection.\n", - "type": "integer" - }, - "replicationFactor": { - "description": "(The default is `1`): in a cluster, this attribute determines how many copies\nof each shard are kept on different DB-Servers. The value 1 means that only one\ncopy (no synchronous replication) is kept. A value of k means that k-1 replicas\nare kept. For SatelliteCollections, it needs to be the string `\"satellite\"`,\nwhich matches the replication factor to the number of DB-Servers\n(Enterprise Edition only).\n\nAny two copies reside on different DB-Servers. Replication between them is\nsynchronous, that is, every write operation to the \"leader\" copy will be replicated\nto all \"follower\" replicas, before the write operation is reported successful.\n\nIf a server fails, this is detected automatically and one of the servers holding\ncopies take over, usually without an error being reported.\n", - "type": "integer" - }, - "schema": { - "description": "Optional object that specifies the collection level schema for\ndocuments. The attribute keys `rule`, `level` and `message` must follow the\nrules documented in [Document Schema Validation](../../concepts/data-structure/documents/schema-validation.md)\n", - "type": "object" - }, - "shardKeys": { - "description": "(The default is `[ \"_key\" ]`): in a cluster, this attribute determines\nwhich document attributes are used to determine the target shard for documents.\nDocuments are sent to shards based on the values of their shard key attributes.\nThe values of all shard key attributes in a document are hashed,\nand the hash value is used to determine the target shard.\n\n\u003e **INFO:**\nValues of shard key attributes cannot be changed once set.\n", - "type": "string" - }, - "shardingStrategy": { - "description": "This attribute specifies the name of the sharding strategy to use for\nthe collection. There are different sharding strategies\nto select from when creating a new collection. The selected `shardingStrategy`\nvalue remains fixed for the collection and cannot be changed afterwards.\nThis is important to make the collection keep its sharding settings and\nalways find documents already distributed to shards using the same\ninitial sharding algorithm.\n\nThe available sharding strategies are:\n- `community-compat`: default sharding used by ArangoDB\n Community Edition before version 3.4\n- `enterprise-compat`: default sharding used by ArangoDB\n Enterprise Edition before version 3.4\n- `enterprise-smart-edge-compat`: default sharding used by smart edge\n collections in ArangoDB Enterprise Edition before version 3.4\n- `hash`: default sharding used for new collections starting from version 3.4\n (excluding smart edge collections)\n- `enterprise-hash-smart-edge`: default sharding used for new\n smart edge collections starting from version 3.4\n- `enterprise-hex-smart-vertex`: sharding used for vertex collections of\n EnterpriseGraphs\n\nIf no sharding strategy is specified, the default is `hash` for\nall normal collections, `enterprise-hash-smart-edge` for all smart edge\ncollections, and `enterprise-hex-smart-vertex` for EnterpriseGraph\nvertex collections (the latter two require the *Enterprise Edition* of ArangoDB).\nManually overriding the sharding strategy does not yet provide a\nbenefit, but it may later in case other sharding strategies are added.\n", - "type": "string" - }, - "smartGraphAttribute": { - "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices.\n\nThis feature can only be used in the *Enterprise Edition*.\n", - "type": "string" - }, - "smartJoinAttribute": { - "description": "In an *Enterprise Edition* cluster, this attribute determines an attribute\nof the collection that must contain the shard key value of the referred-to\nSmartJoin collection. Additionally, the shard key for a document in this\ncollection must contain the value of this attribute, followed by a colon,\nfollowed by the actual primary key of the document.\n\nThis feature can only be used in the *Enterprise Edition* and requires the\n`distributeShardsLike` attribute of the collection to be set to the name\nof another collection. It also requires the `shardKeys` attribute of the\ncollection to be set to a single shard key attribute, with an additional ':'\nat the end.\nA further restriction is that whenever documents are stored or updated in the\ncollection, the value stored in the `smartJoinAttribute` must be a string.\n", - "type": "string" - }, - "type": { - "description": "(The default is `2`): the type of the collection to create.\nThe following values for `type` are valid:\n\n- `2`: document collection\n- `3`: edge collection\n", - "type": "integer" - }, - "waitForSync": { - "description": "If `true` then the data is synchronized to disk before returning from a\ndocument create, update, replace or removal operation. (Default: `false`)\n", - "type": "boolean" - }, - "writeConcern": { - "description": "Write concern for this collection (default: 1).\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`. _(cluster only)_\n", - "type": "integer" - } - }, - "required": [ - "name" - ], - "type": "object" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "description": "", - "properties": { - "cacheEnabled": { - "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", - "type": "boolean" - }, - "computedValues": { - "description": "A list of objects, each representing a computed value.\n", - "items": { - "properties": { - "computeOn": { - "description": "An array of strings that defines on which write operations the value is\ncomputed. The possible values are `\"insert\"`, `\"update\"`, and `\"replace\"`.\n", - "items": { - "type": "string" + "ioTime": { + "description": "IO Time\n", + "properties": { + "count": { + "description": "number of values summarized\n", + "type": "integer" }, - "type": "array" - }, - "expression": { - "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", - "type": "string" - }, - "failOnWarning": { - "description": "Whether the write operation fails if the expression produces a warning.\n", - "type": "boolean" + "counts": { + "description": "array containing the values\n", + "items": { + "type": "integer" + }, + "type": "array" + }, + "sum": { + "description": "summarized value of all counts\n", + "type": "number" + } }, - "keepNull": { - "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", - "type": "boolean" + "required": [ + "sum", + "count", + "counts" + ], + "type": "object" + }, + "queueTime": { + "description": "the time requests were queued waiting for processing\n", + "properties": { + "count": { + "description": "number of values summarized\n", + "type": "integer" + }, + "counts": { + "description": "array containing the values\n", + "items": { + "type": "integer" + }, + "type": "array" + }, + "sum": { + "description": "summarized value of all counts\n", + "type": "number" + } }, - "name": { - "description": "The name of the target attribute.\n", - "type": "string" + "required": [ + "sum", + "count", + "counts" + ], + "type": "object" + }, + "requestTime": { + "description": "the request times\n", + "properties": { + "count": { + "description": "number of values summarized\n", + "type": "integer" + }, + "counts": { + "description": "array containing the values\n", + "items": { + "type": "integer" + }, + "type": "array" + }, + "sum": { + "description": "summarized value of all counts\n", + "type": "number" + } }, - "overwrite": { - "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", - "type": "boolean" - } + "required": [ + "sum", + "count", + "counts" + ], + "type": "object" }, - "required": [ - "name", - "expression", - "overwrite" - ], - "type": "object" + "totalTime": { + "description": "the system time\n", + "properties": { + "count": { + "description": "number of values summarized\n", + "type": "integer" + }, + "counts": { + "description": "array containing the values\n", + "items": { + "type": "integer" + }, + "type": "array" + }, + "sum": { + "description": "summarized value of all counts\n", + "type": "number" + } + }, + "required": [ + "sum", + "count", + "counts" + ], + "type": "object" + } }, - "type": "array" - }, - "distributeShardsLike": { - "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", - "type": "string" + "required": [ + "connectionTime", + "totalTime", + "requestTime", + "queueTime", + "ioTime", + "bytesSent", + "bytesReceived", + "httpConnections" + ], + "type": "object" }, - "globallyUniqueId": { - "description": "A unique identifier of the collection. This is an internal property.\n", - "type": "string" + "code": { + "description": "the HTTP status code - 200 in this case\n", + "type": "integer" }, - "id": { - "description": "A unique identifier of the collection (deprecated).\n", - "type": "string" + "enabled": { + "description": "`true` if the server has the statistics module enabled. If not, don't expect any values.\n", + "type": "boolean" }, - "isDisjoint": { - "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", + "error": { + "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", "type": "boolean" }, - "isSmart": { - "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", - "type": "boolean" - }, - "isSystem": { - "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", - "type": "boolean" + "errorMessage": { + "description": "a descriptive error message\n", + "type": "string" }, - "keyOptions": { - "description": "An object which contains key generation options.\n", + "http": { + "description": "the numbers of requests by Verb\n", "properties": { - "allowUserKeys": { - "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", - "type": "boolean" + "requestsAsync": { + "description": "total number of asynchronous http requests\n", + "type": "integer" }, - "increment": { - "description": "The increment value for the `autoincrement` key generator.\nNot used for other key generator types.\n", + "requestsDelete": { + "description": "No of requests using the DELETE-verb\n", "type": "integer" }, - "lastValue": { - "description": "The current offset value of the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", + "requestsGet": { + "description": "No of requests using the GET-verb\n", "type": "integer" }, - "offset": { - "description": "The initial offset value for the `autoincrement` key generator.\nNot used for other key generator types.\n", + "requestsHead": { + "description": "No of requests using the HEAD-verb\n", "type": "integer" }, - "type": { - "description": "Specifies the type of the key generator. Possible values:\n- `\"traditional\"`\n- `\"autoincrement\"`\n- `\"uuid\"`\n- `\"padded\"`\n", - "type": "string" + "requestsOptions": { + "description": "No of requests using the OPTIONS-verb\n", + "type": "integer" + }, + "requestsOther": { + "description": "No of requests using the none of the above identified verbs\n", + "type": "integer" + }, + "requestsPatch": { + "description": "No of requests using the PATCH-verb\n", + "type": "integer" + }, + "requestsPost": { + "description": "No of requests using the POST-verb\n", + "type": "integer" + }, + "requestsPut": { + "description": "No of requests using the PUT-verb\n", + "type": "integer" + }, + "requestsTotal": { + "description": "total number of http requests\n", + "type": "integer" } }, "required": [ - "type", - "allowUserKeys", - "lastValue" + "requestsTotal", + "requestsAsync", + "requestsGet", + "requestsHead", + "requestsPost", + "requestsPut", + "requestsPatch", + "requestsDelete", + "requestsOptions", + "requestsOther" ], "type": "object" }, - "name": { - "description": "The name of this collection.\n", - "type": "string" - }, - "numberOfShards": { - "description": "The number of shards of the collection. _(cluster only)_\n", - "type": "integer" - }, - "replicationFactor": { - "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", - "type": "integer" - }, - "schema": { - "description": "An object that specifies the collection-level schema for documents.\n", - "type": "object" - }, - "shardKeys": { - "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", - "items": { - "type": "string" + "server": { + "description": "statistics of the server\n", + "properties": { + "physicalMemory": { + "description": "available physical memory on the server\n", + "type": "integer" + }, + "threads": { + "description": "Statistics about the server worker threads (excluding V8 specific or jemalloc specific threads and system threads)\n", + "properties": { + "in-progress": { + "description": "The number of currently busy worker threads\n", + "type": "integer" + }, + "queued": { + "description": "The number of jobs queued up waiting for worker threads becoming available\n", + "type": "integer" + }, + "scheduler-threads": { + "description": "The number of spawned worker threads\n", + "type": "integer" + } + }, + "required": [ + "scheduler-threads", + "in-progress", + "queued" + ], + "type": "object" + }, + "transactions": { + "description": "Statistics about transactions\n", + "properties": { + "aborted": { + "description": "the number of aborted transactions\n", + "type": "integer" + }, + "committed": { + "description": "the number of committed transactions\n", + "type": "integer" + }, + "intermediateCommits": { + "description": "the number of intermediate commits done\n", + "type": "integer" + }, + "started": { + "description": "the number of started transactions\n", + "type": "integer" + } + }, + "required": [ + "started", + "committed", + "aborted", + "intermediateCommits" + ], + "type": "object" + }, + "uptime": { + "description": "time the server is up and running\n", + "type": "integer" + }, + "v8Context": { + "description": "Statistics about the V8 javascript contexts\n", + "properties": { + "available": { + "description": "the number of currently spawned V8 contexts\n", + "type": "integer" + }, + "busy": { + "description": "the number of currently active V8 contexts\n", + "type": "integer" + }, + "dirty": { + "description": "the number of contexts that were previously used, and should now be garbage collected before being re-used\n", + "type": "integer" + }, + "free": { + "description": "the number of V8 contexts that are free to use\n", + "type": "integer" + }, + "max": { + "description": "the maximum number of V8 concurrent contexts we may spawn as configured by --javascript.v8-contexts\n", + "type": "integer" + }, + "memory": { + "description": "a list of V8 memory / garbage collection watermarks; Refreshed on every garbage collection run;\nPreserves min/max memory used at that time for 10 seconds\n", + "items": { + "properties": { + "contextId": { + "description": "ID of the context this set of memory statistics is from\n", + "type": "integer" + }, + "countOfTimes": { + "description": "how many times was the garbage collection run in these 10 seconds\n", + "type": "integer" + }, + "heapMax": { + "description": "High watermark of all garbage collection runs in 10 seconds\n", + "type": "integer" + }, + "heapMin": { + "description": "Low watermark of all garbage collection runs in these 10 seconds\n", + "type": "integer" + }, + "tMax": { + "description": "the timestamp where the 10 seconds interval started\n", + "type": "number" + } + }, + "required": [ + "contextId", + "tMax", + "countOfTimes", + "heapMax", + "heapMin" + ], + "type": "object" + }, + "type": "array" + }, + "min": { + "description": "the minimum number of V8 contexts that are spawned as configured by --javascript.v8-contexts-minimum\n", + "type": "integer" + } + }, + "required": [ + "available", + "busy", + "dirty", + "free", + "max", + "min", + "memory" + ], + "type": "object" + } }, - "type": "array" - }, - "shardingStrategy": { - "description": "The sharding strategy selected for the collection. _(cluster only)_\n\nPossible values:\n- `\"community-compat\"`\n- `\"enterprise-compat\"`\n- `\"enterprise-smart-edge-compat\"`\n- `\"hash\"`\n- `\"enterprise-hash-smart-edge\"`\n- `\"enterprise-hex-smart-vertex\"`\n", - "type": "string" - }, - "smartGraphAttribute": { - "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", - "type": "string" - }, - "smartJoinAttribute": { - "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", - "type": "string" - }, - "syncByRevision": { - "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", - "type": "boolean" - }, - "type": { - "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", - "type": "integer" - }, - "waitForSync": { - "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", - "type": "boolean" + "required": [ + "uptime", + "physicalMemory", + "transactions", + "v8Context", + "threads" + ], + "type": "object" }, - "writeConcern": { - "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`. _(cluster only)_\n", - "type": "integer" + "system": { + "description": "metrics gathered from the system about this process; may depend on the host OS\n", + "properties": { + "majorPageFaults": { + "description": "pagefaults\n", + "type": "integer" + }, + "minorPageFaults": { + "description": "pagefaults\n", + "type": "integer" + }, + "numberOfThreads": { + "description": "the number of threads in the server\n", + "type": "integer" + }, + "residentSize": { + "description": "RSS of process\n", + "type": "integer" + }, + "residentSizePercent": { + "description": "RSS of process in %\n", + "type": "number" + }, + "systemTime": { + "description": "the system CPU time used by the server process\n", + "type": "number" + }, + "userTime": { + "description": "the user CPU time used by the server process\n", + "type": "number" + }, + "virtualSize": { + "description": "VSS of the process\n", + "type": "integer" + } + }, + "required": [ + "minorPageFaults", + "majorPageFaults", + "userTime", + "systemTime", + "numberOfThreads", + "residentSize", + "residentSizePercent", + "virtualSize" + ], + "type": "object" + }, + "time": { + "description": "the current server timestamp\n", + "type": "integer" } }, "required": [ - "waitForSync", - "keyOptions", - "cacheEnabled", - "syncByRevision" + "error", + "code", + "time", + "errorMessage", + "enabled", + "system", + "client", + "http", + "server" ], "type": "object" } } }, - "description": "" - }, - "400": { - "description": "If the `collection-name` is missing, then an *HTTP 400* is\nreturned.\n" + "description": "Statistics were returned successfully.\n" }, "404": { - "description": "If the `collection-name` is unknown, then an *HTTP 404* is returned.\n" + "description": "Statistics are disabled on the instance.\n" } }, - "summary": "Create a collection", + "summary": "Get the statistics", "tags": [ - "Collections" + "Monitoring" ] } }, - "/_api/collection/{collection-name}": { - "delete": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nDrops the collection identified by `collection-name`.\n\nIf the collection was successfully dropped, an object is returned with\nthe following attributes:\n\n- `error`: `false`\n\n- `id`: The identifier of the dropped collection.\n", - "operationId": "deleteCollection", - "parameters": [ - { - "description": "The name of the collection to drop.\n", - "in": "path", - "name": "collection-name", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "Whether or not the collection to drop is a system collection. This parameter\nmust be set to `true` in order to drop a system collection.\n", - "in": "query", - "name": "isSystem", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Drop a collection", - "tags": [ - "Collections" - ] - }, + "/_db/{database-name}/_admin/statistics-description": { "get": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nThe result is an object describing the collection with the following\nattributes:\n\n- `id`: The identifier of the collection.\n\n- `name`: The name of the collection.\n\n- `status`: The status of the collection as number.\n - 3: loaded\n - 5: deleted\n\nEvery other status indicates a corrupted collection.\n\n- `type`: The type of the collection as number.\n - 2: document collection (normal case)\n - 3: edge collection\n\n- `isSystem`: If `true` then the collection is a system collection.\n", - "operationId": "getCollection", + "description": "\u003e **WARNING:**\nThis endpoint should no longer be used. It is deprecated from version 3.8.0 on.\nUse `/_admin/metrics/v2` instead, which provides the data exposed by the\nstatistics API and a lot more.\n\n\nReturns a description of the statistics returned by `/_admin/statistics`.\nThe returned objects contains an array of statistics groups in the attribute\n`groups` and an array of statistics figures in the attribute `figures`.\n\nA statistics group is described by\n\n- `group`: The identifier of the group.\n- `name`: The name of the group.\n- `description`: A description of the group.\n\nA statistics figure is described by\n\n- `group`: The identifier of the group to which this figure belongs.\n- `identifier`: The identifier of the figure. It is unique within the group.\n- `name`: The name of the figure.\n- `description`: A description of the figure.\n- `type`: Either `current`, `accumulated`, or `distribution`.\n- `cuts`: The distribution vector.\n- `units`: Units in which the figure is measured.\n", + "operationId": "getStatisticsDescription", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -5017,72 +4105,121 @@ } ], "responses": { - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is\nreturned.\n" + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "the error, `false` in this case\n", + "type": "boolean" + }, + "figures": { + "description": "A statistics figure\n", + "items": { + "properties": { + "cuts": { + "description": "The distribution vector.\n", + "type": "string" + }, + "description": { + "description": "A description of the figure.\n", + "type": "string" + }, + "group": { + "description": "The identifier of the group to which this figure belongs.\n", + "type": "string" + }, + "identifier": { + "description": "The identifier of the figure. It is unique within the group.\n", + "type": "string" + }, + "name": { + "description": "The name of the figure.\n", + "type": "string" + }, + "type": { + "description": "Either `current`, `accumulated`, or `distribution`.\n", + "type": "string" + }, + "units": { + "description": "Units in which the figure is measured.\n", + "type": "string" + } + }, + "required": [ + "group", + "identifier", + "name", + "description", + "type", + "cuts", + "units" + ], + "type": "object" + }, + "type": "array" + }, + "groups": { + "description": "A statistics group\n", + "items": { + "properties": { + "description": { + "description": "A description of the group.\n", + "type": "string" + }, + "group": { + "description": "The identifier of the group.\n", + "type": "string" + }, + "name": { + "description": "The name of the group.\n", + "type": "string" + } + }, + "required": [ + "group", + "name", + "description" + ], + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "groups", + "figures", + "code", + "error" + ], + "type": "object" + } + } + }, + "description": "Description was returned successfully.\n" } }, - "summary": "Get the collection information", + "summary": "Get the statistics description", "tags": [ - "Collections" + "Monitoring" ] } }, - "/_api/collection/{collection-name}/checksum": { + "/_db/{database-name}/_admin/status": { "get": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nWill calculate a checksum of the meta-data (keys and optionally revision ids) and\noptionally the document data in the collection.\n\nThe checksum can be used to compare if two collections on different ArangoDB\ninstances contain the same contents. The current revision of the collection is\nreturned too so one can make sure the checksums are calculated for the same\nstate of data.\n\nBy default, the checksum will only be calculated on the `_key` system attribute\nof the documents contained in the collection. For edge collections, the system\nattributes `_from` and `_to` will also be included in the calculation.\n\nBy setting the optional query parameter `withRevisions` to `true`, then revision\nids (`_rev` system attributes) are included in the checksumming.\n\nBy providing the optional query parameter `withData` with a value of `true`,\nthe user-defined document attributes will be included in the calculation too.\n\n\u003e **INFO:**\nIncluding user-defined attributes will make the checksumming slower.\n\n\nThe response is a JSON object with the following attributes:\n\n- `checksum`: The calculated checksum as a number.\n\n- `revision`: The collection revision id as a string.\n", - "operationId": "getCollectionChecksum", + "description": "Returns status information about the server.\n", + "operationId": "getStatus", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", "in": "path", - "name": "collection-name", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "Whether or not to include document revision ids in the checksum calculation.\n", - "in": "query", - "name": "withRevisions", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether or not to include document body data in the checksum calculation.\n", - "in": "query", - "name": "withData", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" - } - }, - "summary": "Get the collection checksum", - "tags": [ - "Collections" - ] - } - }, - "/_api/collection/{collection-name}/compact": { - "put": { - "description": "Compacts the data of a collection in order to reclaim disk space.\nThe operation will compact the document and index data by rewriting the\nunderlying .sst files and only keeping the relevant entries.\n\nUnder normal circumstances, running a compact operation is not necessary, as\nthe collection data will eventually get compacted anyway. However, in some\nsituations, e.g. after running lots of update/replace or remove operations,\nthe disk data for a collection may contain a lot of outdated data for which the\nspace shall be reclaimed. In this case the compaction operation can be used.\n", - "operationId": "compactCollection", - "parameters": [ - { - "description": "Name of the collection to compact\n", - "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -5091,27 +4228,218 @@ ], "responses": { "200": { - "description": "Compaction started successfully\n" - }, - "401": { - "description": "if the request was not authenticated as a user with sufficient rights\n" + "content": { + "application/json": { + "schema": { + "properties": { + "agency": { + "description": "Information about the Agency.\n*Cluster only* (Coordinators and DB-Servers).\n", + "properties": { + "agencyComm": { + "description": "Information about the communication with the Agency.\n*Cluster only* (Coordinators and DB-Servers).\n", + "properties": { + "endpoints": { + "description": "A list of possible Agency endpoints.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "agent": { + "description": "Information about the Agents.\n*Cluster only* (Agents)\n", + "properties": { + "endpoint": { + "description": "The endpoint of the queried Agent.\n", + "type": "string" + }, + "id": { + "description": "Server ID of the queried Agent.\n", + "type": "string" + }, + "leaderId": { + "description": "Server ID of the leading Agent.\n", + "type": "string" + }, + "leading": { + "description": "Whether the queried Agent is the leader.\n", + "type": "boolean" + }, + "term": { + "description": "The current term number.\n", + "type": "number" + } + }, + "type": "object" + }, + "coordinator": { + "description": "Information about the Coordinators.\n*Cluster only* (Coordinators)\n", + "properties": { + "foxxmaster": { + "description": "The server ID of the Coordinator that is the Foxx master.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "isFoxxmaster": { + "description": "Whether the queried Coordinator is the Foxx master.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "foxxApi": { + "description": "Whether the Foxx API is enabled.\n", + "type": "boolean" + }, + "host": { + "description": "A host identifier defined by the `HOST` or `NODE_NAME` environment variable,\nor a fallback value using a machine identifier or the cluster/Agency address.\n", + "type": "string" + }, + "hostname": { + "description": "A hostname defined by the `HOSTNAME` environment variable.\n", + "type": "string" + }, + "license": { + "description": "ArangoDB Edition, either `\"community\"` or `\"enterprise\"`.\n", + "type": "string" + }, + "mode": { + "description": "Either `\"server\"` or `\"console\"`. **Deprecated**, use `operationMode` instead.\n", + "type": "string" + }, + "operationMode": { + "description": "Either `\"server\"` or `\"console\"`.\n", + "type": "string" + }, + "pid": { + "description": "The process ID of _arangod_.\n", + "type": "number" + }, + "server": { + "description": "Always `\"arango\"`.\n", + "type": "string" + }, + "serverInfo": { + "description": "Information about the server status.\n", + "properties": { + "address": { + "description": "The address of the server, e.g. `tcp://[::1]:8530`.\n*Cluster only* (Coordinators and DB-Servers).\n", + "type": "string" + }, + "maintenance": { + "description": "Whether the maintenance mode is enabled.\n", + "type": "boolean" + }, + "persistedId": { + "description": "The persisted ID, e. g. `\"CRDN-e427b441-5087-4a9a-9983-2fb1682f3e2a\"`.\n*Cluster only* (Agents, Coordinators, and DB-Servers).\n", + "type": "string" + }, + "progress": { + "description": "Startup and recovery information.\n\nYou can check for changes to determine whether progress was made between two\ncalls, but you should not rely on specific values as they may change between\nArangoDB versions. The values are only expected to change during the startup and\nshutdown, i.e. while `maintenance` is `true`.\n\nYou need to start _arangod_ with the `--server.early-connections` startup option\nenabled to be able to query the endpoint during the startup process.\nIf authentication is enabled, then you need to use the super-user JWT for the\nrequest because the user management is not available during the startup.\n", + "properties": { + "feature": { + "description": "Internal name of the feature that is currently being prepared, started,\nstopped or unprepared.\n", + "type": "string" + }, + "phase": { + "description": "Name of the lifecycle phase the instance is currently in. Normally one of\n`\"in prepare\"`, `\"in start\"`, `\"in wait\"`, `\"in shutdown\"`, `\"in stop\"`,\nor `\"in unprepare\"`.\n", + "type": "string" + }, + "recoveryTick": { + "description": "Current recovery sequence number value, if the instance is currently recovering.\nIf the instance is already past the recovery, this attribute will contain the\nlast handled recovery sequence number.\n", + "type": "number" + } + }, + "required": [ + "phase", + "feature", + "recoveryTick" + ], + "type": "object" + }, + "readOnly": { + "description": "Whether writes are disabled.\n", + "type": "boolean" + }, + "rebootId": { + "description": "The reboot ID. Changes on every restart.\n*Cluster only* (Agents, Coordinators, and DB-Servers).\n", + "type": "number" + }, + "role": { + "description": "Either `\"SINGLE\"`, `\"COORDINATOR\"`, `\"PRIMARY\"` (DB-Server), or `\"AGENT\"`.\n", + "type": "string" + }, + "serverId": { + "description": "The server ID, e.g. `\"CRDN-e427b441-5087-4a9a-9983-2fb1682f3e2a\"`.\n*Cluster only* (Coordinators and DB-Servers).\n", + "type": "string" + }, + "state": { + "description": "Either `\"STARTUP\"`, `\"SERVING\"`, or `\"SHUTDOWN\"`.\n*Cluster only* (Coordinators and DB-Servers).\n", + "type": "string" + }, + "writeOpsEnabled": { + "description": "Whether writes are enabled. **Deprecated**, use `readOnly` instead.\n", + "type": "boolean" + } + }, + "required": [ + "progress", + "role", + "writeOpsEnabled", + "readOnly", + "maintenance" + ], + "type": "object" + }, + "version": { + "description": "The server version as a string.\n", + "type": "string" + } + }, + "required": [ + "server", + "license", + "version", + "mode", + "operationMode", + "foxxApi", + "host", + "pid", + "serverInfo" + ], + "type": "object" + } + } + }, + "description": "Status information was returned successfully.\n" } }, - "summary": "Compact a collection", + "summary": "Get server status information", "tags": [ - "Collections" + "Administration" ] } }, - "/_api/collection/{collection-name}/count": { + "/_db/{database-name}/_admin/time": { "get": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nGet the number of documents in a collection.\n\n- `count`: The number of documents stored in the specified collection.\n", - "operationId": "getCollectionCount", + "description": "The call returns an object with the `time` attribute. This contains the\ncurrent system time as a Unix timestamp with microsecond precision.\n", + "operationId": "getTime", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -5119,142 +4447,254 @@ } ], "responses": { - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", + "type": "boolean" + }, + "time": { + "description": "The current system time as a Unix timestamp with microsecond precision of the server\n", + "type": "number" + } + }, + "required": [ + "error", + "code", + "time" + ], + "type": "object" + } + } + }, + "description": "Time was returned successfully.\n" } }, - "summary": "Get the document count of a collection", + "summary": "Get the system time", "tags": [ - "Collections" + "Administration" ] } }, - "/_api/collection/{collection-name}/figures": { + "/_db/{database-name}/_admin/usage-metrics": { "get": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nIn addition to the above, the result also contains the number of documents\nand additional statistical information about the collection.\n", - "operationId": "getCollectionFigures", + "description": "Returns detailed shard usage metrics on DB-Servers.\n\nThese metrics can be enabled by setting the `--server.export-shard-usage-metrics`\nstartup option to `enabled-per-shard` to make DB-Servers collect per-shard\nusage metrics, or to `enabled-per-shard-per-user` to make DB-Servers collect\nusage metrics per shard and per user whenever a shard is accessed.\n", + "operationId": "getUsageMetrics", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database. If the `--server.harden` startup option is enabled,\nadministrate access to the `_system` database is required.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Setting `details` to `true` will return extended storage engine-specific\ndetails to the figures. The details are intended for debugging ArangoDB itself\nand their format is subject to change. By default, `details` is set to `false`,\nso no details are returned and the behavior is identical to previous versions\nof ArangoDB.\nPlease note that requesting `details` may cause additional load and thus have\nan impact on performance.\n", + "description": "Returns the usage metrics of the specified server. If no `serverId` is given,\nthe asked server will reply. This parameter is only meaningful on Coordinators.\n", "in": "query", - "name": "details", + "name": "serverId", "required": false, "schema": { - "type": "boolean" + "type": "string" } } ], "responses": { "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "count": { - "description": "The number of documents currently present in the collection.\n", - "type": "integer" - }, - "figures": { - "description": "The metrics of the collection.\n", - "properties": { - "indexes": { - "description": "The index metrics.\n", - "properties": { - "count": { - "description": "The total number of indexes defined for the collection, including the pre-defined\nindexes (e.g. primary index).\n", - "type": "integer" - }, - "size": { - "description": "The total memory allocated for indexes in bytes.\n", - "type": "integer" - } - }, - "required": [ - "count", - "size" - ], - "type": "object" - } - }, - "required": [ - "indexes" + "description": "Metrics were returned successfully.\n" + } + }, + "summary": "Get usage metrics", + "tags": [ + "Monitoring" + ] + } + }, + "/_db/{database-name}/_api/analyzer": { + "get": { + "description": "Retrieves a an array of all Analyzer definitions.\nThe resulting array contains objects with the following attributes:\n- `name`: the Analyzer name\n- `type`: the Analyzer type\n- `properties`: the properties used to configure the specified type\n- `features`: the set of features to set on the Analyzer generated fields\n", + "operationId": "listAnalyzers", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The Analyzer definitions was retrieved successfully.\n" + } + }, + "summary": "List all Analyzers", + "tags": [ + "Analyzers" + ] + }, + "post": { + "description": "Creates a new Analyzer based on the provided configuration.\n", + "operationId": "createAnalyzer", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "features": { + "description": "The set of features to set on the Analyzer generated fields.\nThe default value is an empty array.\n", + "items": { + "enum": [ + "frequency", + "norm", + "position", + "offset" ], - "type": "object" - } + "type": "string" + }, + "type": "array", + "uniqueItems": true }, - "required": [ - "count", - "figures" - ], - "type": "object" - } + "name": { + "description": "The Analyzer name.\n", + "type": "string" + }, + "properties": { + "description": "The properties used to configure the specified Analyzer type.\n", + "type": "object" + }, + "type": { + "description": "The Analyzer type.\n", + "type": "string" + } + }, + "required": [ + "name", + "type" + ], + "type": "object" } - }, - "description": "Returns information about the collection:\n" + } + } + }, + "responses": { + "200": { + "description": "An Analyzer with a matching name and definition already exists.\n" + }, + "201": { + "description": "A new Analyzer definition was successfully created.\n" }, "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" + "description": "One or more of the required parameters is missing or one or more of the parameters\nis not valid.\n" }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" + "403": { + "description": "The user does not have permission to create and Analyzer with this configuration.\n" } }, - "summary": "Get the collection statistics", + "summary": "Create an Analyzer", "tags": [ - "Collections" + "Analyzers" ] } }, - "/_api/collection/{collection-name}/load": { - "put": { - "description": "\u003e **WARNING:**\nThe load function is deprecated from version 3.8.0 onwards and is a no-op\nfrom version 3.9.0 onwards. It should no longer be used, as it may be removed\nin a future version of ArangoDB.\n\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nSince ArangoDB version 3.9.0 this API does nothing. Previously it used to\nload a collection into memory.\n\nThe request body object might optionally contain the following attribute:\n\n- `count`: If set, this controls whether the return value should include\n the number of documents in the collection. Setting `count` to\n `false` may speed up loading a collection. The default value for\n `count` is `true`.\n\nA call to this API returns an object with the following attributes for\ncompatibility reasons:\n\n- `id`: The identifier of the collection.\n\n- `name`: The name of the collection.\n\n- `count`: The number of documents inside the collection. This is only\n returned if the `count` input parameters is set to `true` or has\n not been specified.\n\n- `status`: The status of the collection as number.\n\n- `type`: The collection type. Valid types are:\n - 2: document collection\n - 3: edge collection\n\n- `isSystem`: If `true` then the collection is a system collection.\n", - "operationId": "loadCollection", + "/_db/{database-name}/_api/analyzer/{analyzer-name}": { + "delete": { + "description": "Removes an Analyzer configuration identified by `analyzer-name`.\n\nIf the Analyzer definition was successfully dropped, an object is returned with\nthe following attributes:\n- `error`: `false`\n- `name`: The name of the removed Analyzer\n", + "operationId": "deleteAnalyzer", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" } + }, + { + "description": "The name of the Analyzer to remove.\n", + "in": "path", + "name": "analyzer-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The Analyzer configuration should be removed even if it is in-use.\nThe default value is `false`.\n", + "in": "query", + "name": "force", + "required": false, + "schema": { + "type": "boolean" + } } ], "responses": { + "200": { + "description": "The Analyzer configuration was removed successfully.\n" + }, "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" + "description": "The `analyzer-name` was not supplied or another request parameter was not\nvalid.\n" + }, + "403": { + "description": "The user does not have permission to remove this Analyzer configuration.\n" }, "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" + "description": "Such an Analyzer configuration does not exist.\n" + }, + "409": { + "description": "The specified Analyzer configuration is still in use and `force` was omitted or\n`false` specified.\n" } }, - "summary": "Load a collection", + "summary": "Remove an Analyzer", "tags": [ - "Collections" + "Analyzers" ] - } - }, - "/_api/collection/{collection-name}/loadIndexesIntoMemory": { - "put": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nYou can call this endpoint to try to cache this collection's index entries in\nthe main memory. Index lookups served from the memory cache can be much faster\nthan lookups not stored in the cache, resulting in a performance boost.\n\nThe endpoint iterates over suitable indexes of the collection and stores the\nindexed values (not the entire document data) in memory. This is implemented for\nedge indexes only.\n\nThe endpoint returns as soon as the index warmup has been scheduled. The index\nwarmup may still be ongoing in the background, even after the return value has\nalready been sent. As all suitable indexes are scanned, it may cause significant\nI/O activity and background load.\n\nThis feature honors memory limits. If the indexes you want to load are smaller\nthan your memory limit, this feature guarantees that most index values are\ncached. If the index is greater than your memory limit, this feature fills\nup values up to this limit. You cannot control which indexes of the collection\nshould have priority over others.\n\nIt is guaranteed that the in-memory cache data is consistent with the stored\nindex data at all times.\n\nOn success, this endpoint returns an object with attribute `result` set to `true`.\n", - "operationId": "loadCollectionIndexes", + }, + "get": { + "description": "Retrieves the full definition for the specified Analyzer name.\nThe resulting object contains the following attributes:\n- `name`: the Analyzer name\n- `type`: the Analyzer type\n- `properties`: the properties used to configure the specified type\n- `features`: the set of features to set on the Analyzer generated fields\n", + "operationId": "getAnalyzer", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the Analyzer to retrieve.\n", + "in": "path", + "name": "analyzer-name", "required": true, "schema": { "type": "string" @@ -5263,34 +4703,41 @@ ], "responses": { "200": { - "description": "If the index loading has been scheduled for all suitable indexes.\n" - }, - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" + "description": "The Analyzer definition was retrieved successfully.\n" }, "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + "description": "Such an Analyzer configuration does not exist.\n" } }, - "summary": "Load collection indexes into memory", + "summary": "Get an Analyzer definition", "tags": [ - "Collections" + "Analyzers" ] } }, - "/_api/collection/{collection-name}/properties": { + "/_db/{database-name}/_api/aqlfunction": { "get": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nReturns all properties of the specified collection.\n", - "operationId": "getCollectionProperties", + "description": "Returns all registered user-defined functions (UDFs) for the use in AQL of the\ncurrent database.\n\nThe call returns a JSON array with status codes and all user functions found under `result`.\n", + "operationId": "listAqlUserFunctions", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" } + }, + { + "description": "Returns all registered AQL user functions from the specified namespace.\n", + "in": "query", + "name": "namespace", + "required": false, + "schema": { + "type": "string" + } } ], "responses": { @@ -5298,192 +4745,102 @@ "content": { "application/json": { "schema": { - "description": "", "properties": { - "cacheEnabled": { - "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", "type": "boolean" }, - "computedValues": { - "description": "A list of objects, each representing a computed value.\n", + "result": { + "description": "All functions, or the ones matching the `namespace` parameter\n", "items": { "properties": { - "computeOn": { - "description": "An array of strings that defines on which write operations the value is\ncomputed. The possible values are `\"insert\"`, `\"update\"`, and `\"replace\"`.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "expression": { - "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", + "code": { + "description": "A string representation of the function body\n", "type": "string" }, - "failOnWarning": { - "description": "Whether the write operation fails if the expression produces a warning.\n", - "type": "boolean" - }, - "keepNull": { - "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", + "isDeterministic": { + "description": "an optional boolean value to indicate whether the function\nresults are fully deterministic (function return value solely depends on\nthe input value and return value is the same for repeated calls with same\ninput). The `isDeterministic` attribute is currently not used but may be\nused later for optimizations.\n", "type": "boolean" }, "name": { - "description": "The name of the target attribute.\n", + "description": "The fully qualified name of the user function\n", "type": "string" - }, - "overwrite": { - "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", - "type": "boolean" } }, "required": [ "name", - "expression", - "overwrite" + "code", + "isDeterministic" ], "type": "object" }, "type": "array" - }, - "distributeShardsLike": { - "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", - "type": "string" - }, - "globallyUniqueId": { - "description": "A unique identifier of the collection. This is an internal property.\n", - "type": "string" - }, - "id": { - "description": "A unique identifier of the collection (deprecated).\n", - "type": "string" - }, - "isDisjoint": { - "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", - "type": "boolean" - }, - "isSystem": { - "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", - "type": "boolean" - }, - "keyOptions": { - "description": "An object which contains key generation options.\n", - "properties": { - "allowUserKeys": { - "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", - "type": "boolean" - }, - "increment": { - "description": "The increment value for the `autoincrement` key generator.\nNot used for other key generator types.\n", - "type": "integer" - }, - "lastValue": { - "description": "The current offset value of the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", - "type": "integer" - }, - "offset": { - "description": "The initial offset value for the `autoincrement` key generator.\nNot used for other key generator types.\n", - "type": "integer" - }, - "type": { - "description": "Specifies the type of the key generator. Possible values:\n- `\"traditional\"`\n- `\"autoincrement\"`\n- `\"uuid\"`\n- `\"padded\"`\n", - "type": "string" - } - }, - "required": [ - "type", - "allowUserKeys", - "lastValue" - ], - "type": "object" - }, - "name": { - "description": "The name of this collection.\n", - "type": "string" - }, - "numberOfShards": { - "description": "The number of shards of the collection. _(cluster only)_\n", - "type": "integer" - }, - "replicationFactor": { - "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", + } + }, + "required": [ + "error", + "code", + "result" + ], + "type": "object" + } + } + }, + "description": "on success *HTTP 200* is returned.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", "type": "integer" }, - "schema": { - "description": "An object that specifies the collection-level schema for documents.\n", - "type": "object" - }, - "shardKeys": { - "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "shardingStrategy": { - "description": "The sharding strategy selected for the collection. _(cluster only)_\n\nPossible values:\n- `\"community-compat\"`\n- `\"enterprise-compat\"`\n- `\"enterprise-smart-edge-compat\"`\n- `\"hash\"`\n- `\"enterprise-hash-smart-edge\"`\n- `\"enterprise-hex-smart-vertex\"`\n", - "type": "string" - }, - "smartGraphAttribute": { - "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", - "type": "string" - }, - "smartJoinAttribute": { - "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", - "type": "string" - }, - "syncByRevision": { - "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", + "error": { + "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", "type": "boolean" }, - "type": { - "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", - "type": "integer" - }, - "waitForSync": { - "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", - "type": "boolean" + "errorMessage": { + "description": "a descriptive error message\n", + "type": "string" }, - "writeConcern": { - "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`. _(cluster only)_\n", + "errorNum": { + "description": "the server error number\n", "type": "integer" } }, "required": [ - "waitForSync", - "keyOptions", - "cacheEnabled", - "syncByRevision" + "error", + "code", + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "" - }, - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" + "description": "If the user function name is malformed, the server will respond with *HTTP 400*.\n" } }, - "summary": "Get the properties of a collection", + "summary": "List the registered user-defined AQL functions", "tags": [ - "Collections" + "Queries" ] }, - "put": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nChanges the properties of a collection. Only the provided attributes are\nupdated. Collection properties **cannot be changed** once a collection is\ncreated except for the listed properties, as well as the collection name via\nthe rename endpoint (but not in clusters).\n", - "operationId": "updateCollectionProperties", + "post": { + "description": "Registers a user-defined function (UDF) written in JavaScript for the use in\nAQL queries in the current database.\n\nIn case of success, HTTP 200 is returned.\nIf the function isn't valid etc. HTTP 400 including a detailed error message will be returned.\n", + "operationId": "createAqlUserFunction", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -5495,154 +4852,277 @@ "application/json": { "schema": { "properties": { - "cacheEnabled": { - "description": "Whether the in-memory hash cache for documents should be enabled for this\ncollection (default: `false`). Can be controlled globally with the `--cache.size`\nstartup option. The cache can speed up repeated reads of the same documents via\ntheir document keys. If the same documents are not fetched often or are\nmodified frequently, then you may disable the cache to avoid the maintenance\ncosts.\n", - "type": "boolean" - }, - "computedValues": { - "description": "An optional list of objects, each representing a computed value.\n", - "items": { - "properties": { - "computeOn": { - "description": "An array of strings to define on which write operations the value shall be\ncomputed. The possible values are `\"insert\"`, `\"update\"`, and `\"replace\"`.\nThe default is `[\"insert\", \"update\", \"replace\"]`.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "expression": { - "description": "An AQL `RETURN` operation with an expression that computes the desired value.\nSee [Computed Value Expressions](../../concepts/data-structure/documents/computed-values.md#computed-value-expressions) for details.\n", - "type": "string" - }, - "failOnWarning": { - "description": "Whether to let the write operation fail if the expression produces a warning.\nThe default is `false`.\n", - "type": "boolean" - }, - "keepNull": { - "description": "Whether the target attribute shall be set if the expression evaluates to `null`.\nYou can set the option to `false` to not set (or unset) the target attribute if\nthe expression returns `null`. The default is `true`.\n", - "type": "boolean" - }, - "name": { - "description": "The name of the target attribute. Can only be a top-level attribute, but you\nmay return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`,\nor a shard key attribute.\n", - "type": "string" - }, - "overwrite": { - "description": "Whether the computed value shall take precedence over a user-provided or\nexisting attribute.\n", - "type": "boolean" - } - }, - "required": [ - "name", - "expression", - "overwrite" - ], - "type": "object" - }, - "type": "array" - }, - "replicationFactor": { - "description": "(The default is `1`): in a cluster, this attribute determines how many copies\nof each shard are kept on different DB-Servers. The value 1 means that only one\ncopy (no synchronous replication) is kept. A value of k means that k-1 replicas\nare kept. For SatelliteCollections, it needs to be the string `\"satellite\"`,\nwhich matches the replication factor to the number of DB-Servers\n(Enterprise Edition only).\n\nAny two copies reside on different DB-Servers. Replication between them is\nsynchronous, that is, every write operation to the \"leader\" copy will be replicated\nto all \"follower\" replicas, before the write operation is reported successful.\n\nIf a server fails, this is detected automatically and one of the servers holding\ncopies take over, usually without an error being reported.\n", - "type": "integer" - }, - "schema": { - "description": "Optional object that specifies the collection level schema for\ndocuments. The attribute keys `rule`, `level` and `message` must follow the\nrules documented in [Document Schema Validation](../../concepts/data-structure/documents/schema-validation.md)\n", - "type": "object" + "code": { + "description": "a string representation of the function body.\n", + "type": "string" }, - "waitForSync": { - "description": "If `true` then the data is synchronized to disk before returning from a\ndocument create, update, replace or removal operation. (default: false)\n", + "isDeterministic": { + "description": "an optional boolean value to indicate whether the function\nresults are fully deterministic (function return value solely depends on\nthe input value and return value is the same for repeated calls with same\ninput). The `isDeterministic` attribute is currently not used but may be\nused later for optimizations.\n", "type": "boolean" }, - "writeConcern": { - "description": "Write concern for this collection (default: 1).\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`. _(cluster only)_\n", - "type": "integer" + "name": { + "description": "the fully qualified name of the user functions.\n", + "type": "string" } }, + "required": [ + "name", + "code" + ], "type": "object" } } } }, - "responses": { - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" - } - }, - "summary": "Change the properties of a collection", - "tags": [ - "Collections" - ] - } - }, - "/_api/collection/{collection-name}/recalculateCount": { - "put": { - "description": "Recalculates the document count of a collection, if it ever becomes inconsistent.\n\nIt returns an object with the attributes\n\n- `result`: will be `true` if recalculating the document count succeeded.\n", - "operationId": "recalculateCollectionCount", - "parameters": [ - { - "description": "The name of the collection.\n", - "in": "path", - "name": "collection-name", - "required": true, - "schema": { - "type": "string" - } - } - ], "responses": { "200": { - "description": "If the document count was recalculated successfully, *HTTP 200* is returned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Recalculate the document count of a collection", - "tags": [ - "Collections" - ] - } - }, - "/_api/collection/{collection-name}/rename": { - "put": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nRenames a collection. Expects an object with the attribute(s)\n\n- `name`: The new name.\n\nIt returns an object with the attributes\n\n- `id`: The identifier of the collection.\n\n- `name`: The new name of the collection.\n\n- `status`: The status of the collection as number.\n\n- `type`: The collection type. Valid types are:\n - 2: document collection\n - 3: edges collection\n\n- `isSystem`: If `true` then the collection is a system collection.\n\nIf renaming the collection succeeds, then the collection is also renamed in\nall graph definitions inside the `_graphs` collection in the current database.\n\n\u003e **INFO:**\nRenaming collections is not supported in cluster deployments.\n", - "operationId": "renameCollection", - "parameters": [ - { - "description": "The name of the collection to rename.\n", - "in": "path", - "name": "collection-name", + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", + "type": "boolean" + }, + "isNewlyCreated": { + "description": "boolean flag to indicate whether the function was newly created (`false` in this case)\n", + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "isNewlyCreated" + ], + "type": "object" + } + } + }, + "description": "If the function already existed and was replaced by the\ncall, the server will respond with *HTTP 200*.\n" + }, + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", + "type": "boolean" + }, + "isNewlyCreated": { + "description": "boolean flag to indicate whether the function was newly created (`true` in this case)\n", + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "isNewlyCreated" + ], + "type": "object" + } + } + }, + "description": "If the function can be registered by the server, the server will respond with\n*HTTP 201*.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", + "type": "boolean" + }, + "errorMessage": { + "description": "a descriptive error message\n", + "type": "string" + }, + "errorNum": { + "description": "the server error number\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "If the JSON representation is malformed or mandatory data is missing from the\nrequest, the server will respond with *HTTP 400*.\n" + } + }, + "summary": "Create a user-defined AQL function", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/aqlfunction/{name}": { + "delete": { + "description": "Deletes an existing user-defined function (UDF) or function group identified by\n`name` from the current database.\n", + "operationId": "deleteAqlUserFunction", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "the name of the AQL user function.\n", + "in": "path", + "name": "name", "required": true, "schema": { "type": "string" } + }, + { + "description": "- `true`: The function name provided in `name` is treated as\n a namespace prefix, and all functions in the specified namespace will be deleted.\n The returned number of deleted functions may become 0 if none matches the string.\n- `false`: The function name provided in `name` must be fully\n qualified, including any namespaces. If none matches the `name`, HTTP 404 is returned.\n", + "in": "query", + "name": "group", + "required": false, + "schema": { + "type": "string" + } } ], "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "deletedCount": { + "description": "The number of deleted user functions, always `1` when `group` is set to `false`.\nAny number `\u003e= 0` when `group` is set to `true`.\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`false` in this case)\n", + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "deletedCount" + ], + "type": "object" + } + } + }, + "description": "If the function can be removed by the server, the server will respond with\n*HTTP 200*.\n" + }, "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", + "type": "boolean" + }, + "errorMessage": { + "description": "a descriptive error message\n", + "type": "string" + }, + "errorNum": { + "description": "the server error number\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "If the user function name is malformed, the server will respond with *HTTP 400*.\n" }, "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate whether an error occurred (`true` in this case)\n", + "type": "boolean" + }, + "errorMessage": { + "description": "a descriptive error message\n", + "type": "string" + }, + "errorNum": { + "description": "the server error number\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "If the specified user function does not exist, the server will respond with *HTTP 404*.\n" } }, - "summary": "Rename a collection", + "summary": "Remove a user-defined AQL function", "tags": [ - "Collections" + "Queries" ] } }, - "/_api/collection/{collection-name}/responsibleShard": { - "put": { - "description": "Returns the ID of the shard that is responsible for the given document\n(if the document exists) or that would be responsible if such document\nexisted.\n\nThe request must body must contain a JSON document with at least the\ncollection's shard key attributes set to some values.\n\nThe response is a JSON object with a `shardId` attribute, which will\ncontain the ID of the responsible shard.\n\n\u003e **INFO:**\nThis method is only available in cluster deployments on Coordinators.\n", - "operationId": "getResponsibleShard", + "/_db/{database-name}/_api/batch": { + "post": { + "description": "Executes a batch request. A batch request can contain any number of\nother requests that can be sent to ArangoDB in isolation. The benefit of\nusing batch requests is that batching requests requires less client/server\nroundtrips than when sending isolated requests.\n\nAll parts of a batch request are executed serially on the server. The\nserver will return the results of all parts in a single response when all\nparts are finished.\n\nTechnically, a batch request is a multipart HTTP request, with\ncontent-type `multipart/form-data`. A batch request consists of an\nenvelope and the individual batch part actions. Batch part actions\nare \"regular\" HTTP requests, including full header and an optional body.\nMultiple batch parts are separated by a boundary identifier. The\nboundary identifier is declared in the batch envelope. The MIME content-type\nfor each individual batch part must be `application/x-arango-batchpart`.\n\nPlease note that when constructing the individual batch parts, you must\nuse CRLF (`\\r\\n`) as the line terminator as in regular HTTP messages.\n\nThe response sent by the server will be an `HTTP 200` response, with an\noptional error summary header `x-arango-errors`. This header contains the\nnumber of batch part operations that failed with an HTTP error code of at\nleast 400. This header is only present in the response if the number of\nerrors is greater than zero.\n\nThe response sent by the server is a multipart response, too. It contains\nthe individual HTTP responses for all batch parts, including the full HTTP\nresult header (with status code and other potential headers) and an\noptional result body. The individual batch parts in the result are\nseparated using the same boundary value as specified in the request.\n\nThe order of batch parts in the response will be the same as in the\noriginal client request. Client can additionally use the `Content-Id`\nMIME header in a batch part to define an individual id for each batch part.\nThe server will return this id is the batch part responses, too.\n", + "operationId": "executeBatchRequest", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -5654,13 +5134,13 @@ "application/json": { "schema": { "properties": { - "document": { - "description": "The request body must be a JSON object with at least the shard key\nattributes set to some values, but it may also be a full document.\n", - "type": "object" + "body": { + "description": "The multipart batch request, consisting of the envelope and the individual\nbatch parts.\n", + "type": "string" } }, "required": [ - "document" + "body" ], "type": "object" } @@ -5669,71 +5149,40 @@ }, "responses": { "200": { - "description": "Returns the ID of the responsible shard.\n" + "description": "is returned if the batch was received successfully. HTTP 200 is returned\neven if one or multiple batch part actions failed.\n" }, "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\nAdditionally, if not all of the collection's shard key\nattributes are present in the input document, then a\n*HTTP 400* is returned as well.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then an *HTTP 404*\nis returned.\n" + "description": "is returned if the batch envelope is malformed or incorrectly formatted.\nThis code will also be returned if the content-type of the overall batch\nrequest or the individual MIME parts is not as expected.\n" }, - "501": { - "description": "*HTTP 501* is returned if the method is called on a single server.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" } }, - "summary": "Get the responsible shard for a document", + "summary": "Execute a batch request", "tags": [ - "Collections" + "Batch Requests" ] } }, - "/_api/collection/{collection-name}/revision": { + "/_db/{database-name}/_api/collection": { "get": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nThe response will contain the collection's latest used revision id.\nThe revision id is a server-generated string that clients can use to\ncheck whether data in a collection has changed since the last revision check.\n\n- `revision`: The collection revision id as a string.\n", - "operationId": "getCollectionRevision", + "description": "Returns basic information for all collections in the current database,\noptionally excluding system collections.\n", + "operationId": "listCollections", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection-name", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" - } - }, - "summary": "Get the collection revision ID", - "tags": [ - "Collections" - ] - } - }, - "/_api/collection/{collection-name}/shards": { - "get": { - "description": "By default returns a JSON array with the shard IDs of the collection.\n\nIf the `details` parameter is set to `true`, it will return a JSON object with the\nshard IDs as object attribute keys, and the responsible servers for each shard mapped to them.\nIn the detailed response, the leader shards will be first in the arrays.\n\n\u003e **INFO:**\nThis method is only available in cluster deployments on Coordinators.\n", - "operationId": "getCollectionShards", - "parameters": [ - { - "description": "The name of the collection.\n", - "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "If set to true, the return value will also contain the responsible servers for the collections' shards.\n", + "description": "Whether system collections should be excluded from the result.\n", "in": "query", - "name": "details", + "name": "excludeSystem", "required": false, "schema": { "type": "boolean" @@ -5742,813 +5191,551 @@ ], "responses": { "200": { - "description": "Returns the collection's shards.\n" - }, - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then an *HTTP 404*\nis returned.\n" - }, - "501": { - "description": "*HTTP 501* is returned if the method is called on a single server.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "result": { + "description": "A list with every item holding basic collection metadata.\n", + "items": { + "properties": { + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "example": false, + "type": "boolean" + }, + "name": { + "description": "The name of the collection.\n", + "example": "coll", + "type": "string" + }, + "status": { + "description": "The status of the collection.\n- `3`: loaded\n- `5`: deleted\n\nEvery other status indicates a corrupted collection.\n", + "example": 3, + "type": "integer" + }, + "type": { + "description": "The type of the collection:\n- `0`: \"unknown\"\n- `2`: regular document collection\n- `3`: edge collection\n", + "example": 2, + "type": "integer" + } + }, + "required": [ + "id", + "name", + "status", + "type", + "isSystem", + "globallyUniqueId" + ], + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "error", + "code", + "result" + ], + "type": "object" + } + } + }, + "description": "The list of collections.\n" } }, - "summary": "Get the shard IDs of a collection", + "summary": "List all collections", "tags": [ "Collections" ] - } - }, - "/_api/collection/{collection-name}/truncate": { - "put": { - "description": "\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nRemoves all documents from the collection, but leaves the indexes intact.\n", - "operationId": "truncateCollection", + }, + "post": { + "description": "Creates a new collection with a given name. The request must contain an\nobject with the following attributes.\n", + "operationId": "createCollection", "parameters": [ { - "description": "The name of the collection.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection-name", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "If `true` then the data is synchronized to disk before returning from the\ntruncate operation (default: `false`)\n", + "description": "The default is `true`, which means the server only reports success back to the\nclient when all replicas have created the collection. Set it to `false` if you want\nfaster server responses and don't care about full replication.\n", "in": "query", - "name": "waitForSync", + "name": "waitForSyncReplication", "required": false, "schema": { + "default": true, "type": "boolean" } }, { - "description": "If `true` (default) then the storage engine is told to start a compaction\nin order to free up disk space. This can be resource intensive. If the only\nintention is to start over with an empty collection, specify `false`.\n", + "description": "The default is `true`, which means the server checks if there are enough replicas\navailable at creation time and bail out otherwise. Set it to `false` to disable\nthis extra check.\n", "in": "query", - "name": "compact", + "name": "enforceReplicationFactor", "required": false, "schema": { + "default": true, "type": "boolean" } } ], - "responses": { - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404*\nis returned.\n" - } - }, - "summary": "Truncate a collection", - "tags": [ - "Collections" - ] - } - }, - "/_api/collection/{collection-name}/unload": { - "put": { - "description": "\u003e **WARNING:**\nThe unload function is deprecated from version 3.8.0 onwards and is a no-op\nfrom version 3.9.0 onwards. It should no longer be used, as it may be removed\nin a future version of ArangoDB.\n\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n\n\nSince ArangoDB version 3.9.0 this API does nothing. Previously it used to\nunload a collection from memory, while preserving all documents.\nWhen calling the API an object with the following attributes is\nreturned for compatibility reasons:\n\n- `id`: The identifier of the collection.\n\n- `name`: The name of the collection.\n\n- `status`: The status of the collection as number.\n\n- `type`: The collection type. Valid types are:\n - 2: document collection\n - 3: edges collection\n\n- `isSystem`: If `true` then the collection is a system collection.\n", - "operationId": "unloadCollection", - "parameters": [ - { - "description": "The name of the collection.\n", - "in": "path", - "name": "collection-name", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "400": { - "description": "If the `collection-name` is missing, then a *HTTP 400* is\nreturned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Unload a collection", - "tags": [ - "Collections" - ] - } - }, - "/_api/control_pregel": { - "get": { - "description": "Returns a list of currently running and recently finished Pregel jobs without\nretrieving their results.\n", - "operationId": "listPregelJobs", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "description": "A list of objects describing the Pregel jobs.\n", - "items": { - "properties": { - "algorithm": { - "description": "The algorithm used by the job.\n", - "type": "string" - }, - "computationTime": { - "description": "The algorithm execution time. Is shown when the computation started.\n", - "type": "number" - }, - "created": { - "description": "The date and time when the job was created.\n", - "type": "string" - }, - "detail": { - "description": "The Pregel run details.\n", - "properties": { - "aggregatedStatus": { - "description": "The aggregated details of the full Pregel run. The values are totals of all the\nDB-Server.\n", - "properties": { - "allGssStatus": { - "description": "Information about the global supersteps.\n", - "properties": { - "items": { - "description": "A list of objects with details for each global superstep.\n", - "items": { - "properties": { - "memoryBytesUsedForMessages": { - "description": "The number of bytes used in memory for the messages in this step.\n", - "type": "integer" - }, - "messagesReceived": { - "description": "The number of messages received in this step.\n", - "type": "integer" - }, - "messagesSent": { - "description": "The number of messages sent in this step.\n", - "type": "integer" - }, - "verticesProcessed": { - "description": "The number of vertices that have been processed in this step.\n", - "type": "integer" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "graphStoreStatus": { - "description": "The status of the in memory graph.\n", - "properties": { - "edgesLoaded": { - "description": "The number of edges that are loaded from the database into memory.\n", - "type": "integer" - }, - "memoryBytesUsed": { - "description": "The number of bytes used in-memory for the loaded graph.\n", - "type": "integer" - }, - "verticesLoaded": { - "description": "The number of vertices that are loaded from the database into memory.\n", - "type": "integer" - }, - "verticesStored": { - "description": "The number of vertices that are written back to the database after the Pregel\ncomputation finished. It is only set if the `store` parameter is set to `true`.\n", - "type": "integer" - } - }, - "type": "object" - }, - "timeStamp": { - "description": "The time at which the status was measured.\n", - "type": "string" - } - }, - "required": [ - "timeStamp" + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "cacheEnabled": { + "default": false, + "description": "Whether the in-memory hash cache for documents should be enabled for this\ncollection. Can be controlled globally with the `--cache.size`\nstartup option. The cache can speed up repeated reads of the same documents via\ntheir document keys. If the same documents are not fetched often or are\nmodified frequently, then you may disable the cache to avoid the maintenance\ncosts.\n", + "type": "boolean" + }, + "computedValues": { + "description": "An optional list of objects, each representing a computed value.\n", + "items": { + "properties": { + "computeOn": { + "default": [ + "insert", + "update", + "replace" + ], + "description": "An array of strings to define on which write operations the value shall be\ncomputed.\n", + "items": { + "enum": [ + "insert", + "update", + "replace" ], - "type": "object" + "type": "string" }, - "workerStatus": { - "description": "The details of the Pregel for every DB-Server. Each object key is a DB-Server ID,\n\nand each value is a nested object similar to the `aggregatedStatus` attribute.\n\nIn a single server deployment, there is only a single entry with an empty string as key.\n", - "type": "object" - } + "type": "array", + "uniqueItems": true }, - "required": [ - "aggregatedStatus", - "workerStatus" - ], - "type": "object" + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\nSee [Computed Value Expressions](https://docs.arangodb.com/3.12/concepts/data-structure/documents/computed-values/#computed-value-expressions) for details.\n", + "type": "string" + }, + "failOnWarning": { + "default": false, + "description": "Whether to let the write operation fail if the expression produces a warning.\n", + "type": "boolean" + }, + "keepNull": { + "default": true, + "description": "Whether the target attribute shall be set if the expression evaluates to `null`.\nYou can set the option to `false` to not set (or unset) the target attribute if\nthe expression returns `null`.\n", + "type": "boolean" + }, + "name": { + "description": "The name of the target attribute. Can only be a top-level attribute, but you\nmay return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`,\nor a shard key attribute.\n", + "type": "string" + }, + "overwrite": { + "description": "Whether the computed value shall take precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" + } }, - "edgeCount": { - "description": "The total number of edges processed.\n", - "type": "integer" + "required": [ + "name", + "expression", + "overwrite" + ], + "type": "object" + }, + "type": "array" + }, + "distributeShardsLike": { + "default": "", + "description": "The name of another collection. If this property is set in a cluster, the\ncollection copies the `replicationFactor`, `numberOfShards` and `shardingStrategy`\nproperties from the specified collection (referred to as the _prototype collection_)\nand distributes the shards of this collection in the same way as the shards of\nthe other collection. In an Enterprise Edition cluster, this data co-location is\nutilized to optimize queries.\n\nYou need to use the same number of `shardKeys` as the prototype collection, but\nyou can use different attributes.\n\n\u003e **INFO:**\nUsing this parameter has consequences for the prototype\ncollection. It can no longer be dropped, before the sharding-imitating\ncollections are dropped. Equally, backups and restores of imitating\ncollections alone generate warnings (which can be overridden)\nabout a missing sharding prototype.\n", + "type": "string" + }, + "isDisjoint": { + "description": "Whether the collection is for a Disjoint SmartGraph\n(Enterprise Edition only). This is an internal property.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the collection is for a SmartGraph or EnterpriseGraph\n(Enterprise Edition only). This is an internal property.\n", + "type": "boolean" + }, + "isSystem": { + "default": false, + "description": "If `true`, create a system collection. In this case, the `collection-name`\nshould start with an underscore. End-users should normally create non-system\ncollections only. API implementors may be required to create system\ncollections in very special occasions, but normally a regular collection will do.\n", + "type": "boolean" + }, + "keyOptions": { + "description": "additional options for key generation. If specified, then `keyOptions`\nshould be a JSON object containing the following attributes:\n", + "properties": { + "allowUserKeys": { + "description": "If set to `true`, then you are allowed to supply own key values in the\n`_key` attribute of documents. If set to `false`, then the key generator\nis solely responsible for generating keys and an error is raised if you\nsupply own key values in the `_key` attribute of documents.\n\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", + "type": "boolean" }, - "expires": { - "description": "The date and time when the job results expire. The expiration date is only\nmeaningful for jobs that were completed, canceled or resulted in an error. Such jobs\nare cleaned up by the garbage collection when they reach their expiration date/time.\n", - "type": "string" + "increment": { + "description": "The increment value for the `autoincrement` key generator.\nNot allowed for other key generator types.\n", + "type": "integer" }, - "gss": { - "description": "The number of global supersteps executed.\n", + "offset": { + "description": "The initial offset value for the `autoincrement` key generator.\nNot allowed for other key generator types.\n", "type": "integer" }, - "gssTimes": { - "description": "Computation time of each global super step. Is shown when the computation started.\n", - "items": { - "type": "number" - }, - "type": "array" - }, - "id": { - "description": "The ID of the Pregel job, as a string.\n", - "type": "string" - }, - "reports": { - "description": "This attribute is used by Programmable Pregel Algorithms (`ppa`, experimental).\nThe value is only populated once the algorithm has finished.\n", - "items": { - "type": "object" - }, - "type": "array" - }, - "startupTime": { - "description": "The startup runtime of the execution.\nThe startup time includes the data loading time and can be substantial.\n", - "type": "number" - }, - "state": { - "description": "The state of the execution. The following values can be returned:\n- `\"none\"`: The Pregel run has not started yet.\n- `\"loading\"`: The graph is being loaded from the database into memory before\n executing the algorithm.\n- `\"running\"`: The algorithm is executing normally.\n- `\"storing\"`: The algorithm finished, but the results are still being written\n back into the collections. Only occurs if the `store` parameter is set to `true`.\n- `\"done\"`: The execution is done. This means that storing is also done.\n This event is announced in the server log (requires at least the `info`\n log level for the `pregel` log topic).\n- `\"canceled\"`: The execution was permanently canceled, either by the user or by\n an error.\n- `\"in error\"`: The execution is in an error state. This can be caused by\n primary DB-Servers being unreachable or unresponsive. The execution\n might recover later, or switch to `\"canceled\"` if it is not able to recover\n successfully.\n- `\"recovering\"`: The execution is actively recovering and\n switches back to `running` if the recovery is successful.\n- `\"fatal error\"`: The execution has failed and cannot recover.\n", + "type": { + "description": "specifies the type of the key generator. The currently available generators are\n`traditional`, `autoincrement`, `uuid` and `padded`.\n\n- The `traditional` key generator generates numerical keys in ascending order.\n The sequence of keys is not guaranteed to be gap-free.\n\n- The `autoincrement` key generator generates numerical keys in ascending order,\n the initial offset and the spacing can be configured (**note**: `autoincrement`\n is currently only supported for non-sharded collections).\n The sequence of generated keys is not guaranteed to be gap-free, because a new key\n will be generated on every document insert attempt, not just for successful\n inserts.\n\n- The `padded` key generator generates keys of a fixed length (16 bytes) in\n ascending lexicographical sort order. This is ideal for the RocksDB storage engine,\n which will slightly benefit keys that are inserted in lexicographically\n ascending order. The key generator can be used in a single-server or cluster.\n The sequence of generated keys is not guaranteed to be gap-free.\n\n- The `uuid` key generator generates universally unique 128 bit keys, which\n are stored in hexadecimal human-readable format. This key generator can be used\n in a single-server or cluster to generate \"seemingly random\" keys. The keys\n produced by this key generator are not lexicographically sorted.\n\nPlease note that keys are only guaranteed to be truly ascending in single\nserver deployments and for collections that only have a single shard (that includes\ncollections in a OneShard database).\nThe reason is that for collections with more than a single shard, document keys\nare generated on Coordinator(s). For collections with a single shard, the document\nkeys are generated on the leader DB-Server, which has full control over the key\nsequence.\n", "type": "string" - }, - "storageTime": { - "description": "The time for storing the results if the job includes results storage.\nIs shown when the storing started.\n", - "type": "number" - }, - "totalRuntime": { - "description": "The total runtime of the execution up to now (if the execution is still ongoing).\n", - "type": "number" - }, - "ttl": { - "description": "The TTL (time to live) value for the job results, specified in seconds.\nThe TTL is used to calculate the expiration date for the job's results.\n", - "type": "number" - }, - "vertexCount": { - "description": "The total number of vertices processed.\n", - "type": "integer" } }, - "required": [ - "id", - "algorithm", - "created", - "ttl", - "state", - "gss", - "totalRuntime", - "startupTime", - "computationTime", - "reports", - "detail" - ], "type": "object" }, - "type": "array" - } - } - }, - "description": "Is returned when the list of jobs can be retrieved successfully.\n" - } - }, - "summary": "List the running Pregel jobs", - "tags": [ - "Pregel" - ] - }, - "post": { - "description": "To start an execution you need to specify the algorithm name and a named graph\n(SmartGraph in cluster). Alternatively you can specify the vertex and edge\ncollections. Additionally you can specify custom parameters which vary for each\nalgorithm.\n", - "operationId": "createPregelJob", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "algorithm": { - "description": "Name of the algorithm. One of:\n- `\"pagerank\"` - Page Rank\n- `\"sssp\"` - Single-Source Shortest Path\n- `\"connectedcomponents\"` - Connected Components\n- `\"wcc\"` - Weakly Connected Components\n- `\"scc\"` - Strongly Connected Components\n- `\"hits\"` - Hyperlink-Induced Topic Search\n- `\"effectivecloseness\"` - Effective Closeness\n- `\"linerank\"` - LineRank\n- `\"labelpropagation\"` - Label Propagation\n- `\"slpa\"` - Speaker-Listener Label Propagation\n", + "name": { + "description": "The name of the collection.\n", "type": "string" }, - "edgeCollections": { - "description": "List of edge collection names.\nPlease note that there are special sharding requirements for collections in order\nto be used with Pregel.\n", - "items": { - "type": "string" - }, - "type": "array" + "numberOfShards": { + "default": 1, + "description": "In a cluster, this value determines the\nnumber of shards to create for the collection.\n", + "type": "integer" }, - "graphName": { - "description": "Name of a graph. Either this or the parameters `vertexCollections` and\n`edgeCollections` are required.\nPlease note that there are special sharding requirements for graphs in order\nto be used with Pregel.\n", - "type": "string" + "replicationFactor": { + "default": 1, + "description": "In a cluster, this attribute determines how many copies\nof each shard are kept on different DB-Servers. The value 1 means that only one\ncopy (no synchronous replication) is kept. A value of k means that k-1 replicas\nare kept. For SatelliteCollections, it needs to be the string `\"satellite\"`,\nwhich matches the replication factor to the number of DB-Servers\n(Enterprise Edition only).\n\nAny two copies reside on different DB-Servers. Replication between them is\nsynchronous, that is, every write operation to the \"leader\" copy will be replicated\nto all \"follower\" replicas, before the write operation is reported successful.\n\nIf a server fails, this is detected automatically and one of the servers holding\ncopies take over, usually without an error being reported.\n", + "type": "integer" }, - "params": { - "description": "General as well as algorithm-specific options.\n\nThe most important general option is \"store\", which controls whether the results\ncomputed by the Pregel job are written back into the source collections or not.\n\nAnother important general option is \"parallelism\", which controls the number of\nparallel threads that work on the Pregel job at most. If \"parallelism\" is not\nspecified, a default value may be used. In addition, the value of \"parallelism\"\nmay be effectively capped at some server-specific value.\n\nThe option \"useMemoryMaps\" controls whether to use disk based files to store\ntemporary results. This might make the computation disk-bound, but allows you to\nrun computations which would not fit into main memory. It is recommended to set\nthis flag for larger datasets.\n\nThe attribute \"shardKeyAttribute\" specifies the shard key that edge collections are\nsharded after (default: `\"vertex\"`).\n", + "schema": { + "description": "Optional object that specifies the collection level schema for\ndocuments. The attribute keys `rule`, `level` and `message` must follow the\nrules documented in [Document Schema Validation](https://docs.arangodb.com/3.12/concepts/data-structure/documents/schema-validation/)\n", "type": "object" }, - "vertexCollections": { - "description": "List of vertex collection names.\nPlease note that there are special sharding requirements for collections in order\nto be used with Pregel.\n", - "items": { - "type": "string" - }, - "type": "array" + "shardKeys": { + "default": [ + "_key" + ], + "description": "In a cluster, this attribute determines\nwhich document attributes are used to determine the target shard for documents.\nDocuments are sent to shards based on the values of their shard key attributes.\nThe values of all shard key attributes in a document are hashed,\nand the hash value is used to determine the target shard.\n\n\u003e **INFO:**\nValues of shard key attributes cannot be changed once set.\n", + "type": "string" + }, + "shardingStrategy": { + "description": "This attribute specifies the name of the sharding strategy to use for\nthe collection. There are different sharding strategies\nto select from when creating a new collection. The selected `shardingStrategy`\nvalue remains fixed for the collection and cannot be changed afterwards.\nThis is important to make the collection keep its sharding settings and\nalways find documents already distributed to shards using the same\ninitial sharding algorithm.\n\nThe available sharding strategies are:\n- `community-compat`: default sharding used by ArangoDB\n Community Edition before version 3.4\n- `enterprise-compat`: default sharding used by ArangoDB\n Enterprise Edition before version 3.4\n- `enterprise-smart-edge-compat`: default sharding used by smart edge\n collections in ArangoDB Enterprise Edition before version 3.4\n- `hash`: default sharding used for new collections starting from version 3.4\n (excluding smart edge collections)\n- `enterprise-hash-smart-edge`: default sharding used for new\n smart edge collections starting from version 3.4\n- `enterprise-hex-smart-vertex`: sharding used for vertex collections of\n EnterpriseGraphs\n\nIf no sharding strategy is specified, the default is `hash` for\nall normal collections, `enterprise-hash-smart-edge` for all smart edge\ncollections, and `enterprise-hex-smart-vertex` for EnterpriseGraph\nvertex collections (the latter two require the *Enterprise Edition* of ArangoDB).\nManually overriding the sharding strategy does not yet provide a\nbenefit, but it may later in case other sharding strategies are added.\n", + "type": "string" + }, + "smartGraphAttribute": { + "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices.\n\nThis feature can only be used in the *Enterprise Edition*.\n", + "type": "string" + }, + "smartJoinAttribute": { + "description": "In an *Enterprise Edition* cluster, this attribute determines an attribute\nof the collection that must contain the shard key value of the referred-to\nSmartJoin collection. Additionally, the shard key for a document in this\ncollection must contain the value of this attribute, followed by a colon,\nfollowed by the actual primary key of the document.\n\nThis feature can only be used in the *Enterprise Edition* and requires the\n`distributeShardsLike` attribute of the collection to be set to the name\nof another collection. It also requires the `shardKeys` attribute of the\ncollection to be set to a single shard key attribute, with an additional ':'\nat the end.\nA further restriction is that whenever documents are stored or updated in the\ncollection, the value stored in the `smartJoinAttribute` must be a string.\n", + "type": "string" + }, + "type": { + "default": 2, + "description": "The type of the collection to create.\nThe following values for `type` are valid:\n\n- `2`: document collection\n- `3`: edge collection\n", + "type": "integer" + }, + "waitForSync": { + "default": false, + "description": "If set to `true`, then the data is synchronized to disk before returning from a\ndocument create, update, replace or removal operation.\n", + "type": "boolean" + }, + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", + "type": "integer" } }, "required": [ - "algorithm" + "name" ], "type": "object" } } } }, - "responses": { - "200": { - "description": "HTTP 200 is returned in case the Pregel was successfully created and the reply\nbody is a string with the `id` to query for the status or to cancel the\nexecution.\n" - }, - "400": { - "description": "An HTTP 400 error is returned if the set of collections for the Pregel job includes\na system collection, or if the collections to not conform to the sharding requirements\nfor Pregel jobs.\n" - }, - "403": { - "description": "An HTTP 403 error is returned if there are not sufficient privileges to access\nthe collections specified for the Pregel job.\n" - }, - "404": { - "description": "An HTTP 404 error is returned if the specified \"algorithm\" is not found, or the\ngraph specified in \"graphName\" is not found, or at least one the collections\nspecified in \"vertexCollections\" or \"edgeCollections\" is not found.\n" - } - }, - "summary": "Start a Pregel job execution", - "tags": [ - "Pregel" - ] - } - }, - "/_api/control_pregel/history": { - "delete": { - "description": "Removes the persisted execution statistics of all past Pregel jobs.\n", - "operationId": "deleteAllPregelJobStatistics", - "responses": { - "200": { - "description": "is returned if all persisted execution statistics have been successfully deleted.\n" - } - }, - "summary": "Remove the execution statistics of all past Pregel jobs", - "tags": [ - "Pregel" - ] - }, - "get": { - "description": "Returns a list of currently running and finished Pregel jobs without retrieving\ntheir results.\n\nThe execution statistics are persisted to a system collection and kept until you\nremove them, whereas the `/_api/control_pregel` endpoint only keeps the\ninformation temporarily in memory.\n", - "operationId": "listPregelJobsStatisics", "responses": { "200": { "content": { "application/json": { "schema": { - "description": "A list of objects describing the Pregel jobs.\n", - "items": { - "properties": { - "algorithm": { - "description": "The algorithm used by the job.\n", - "type": "string" - }, - "computationTime": { - "description": "The algorithm execution time. Is shown when the computation started.\n", - "type": "number" - }, - "created": { - "description": "The date and time when the job was created.\n", - "type": "string" - }, - "detail": { - "description": "The Pregel run details.\n", + "description": "", + "properties": { + "cacheEnabled": { + "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", + "type": "boolean" + }, + "computedValues": { + "description": "A list of objects, each representing a computed value.\n", + "items": { "properties": { - "aggregatedStatus": { - "description": "The aggregated details of the full Pregel run. The values are totals of all the\nDB-Server.\n", - "properties": { - "allGssStatus": { - "description": "Information about the global supersteps.\n", - "properties": { - "items": { - "description": "A list of objects with details for each global superstep.\n", - "items": { - "properties": { - "memoryBytesUsedForMessages": { - "description": "The number of bytes used in memory for the messages in this step.\n", - "type": "integer" - }, - "messagesReceived": { - "description": "The number of messages received in this step.\n", - "type": "integer" - }, - "messagesSent": { - "description": "The number of messages sent in this step.\n", - "type": "integer" - }, - "verticesProcessed": { - "description": "The number of vertices that have been processed in this step.\n", - "type": "integer" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "graphStoreStatus": { - "description": "The status of the in memory graph.\n", - "properties": { - "edgesLoaded": { - "description": "The number of edges that are loaded from the database into memory.\n", - "type": "integer" - }, - "memoryBytesUsed": { - "description": "The number of bytes used in-memory for the loaded graph.\n", - "type": "integer" - }, - "verticesLoaded": { - "description": "The number of vertices that are loaded from the database into memory.\n", - "type": "integer" - }, - "verticesStored": { - "description": "The number of vertices that are written back to the database after the Pregel\ncomputation finished. It is only set if the `store` parameter is set to `true`.\n", - "type": "integer" - } - }, - "type": "object" - }, - "timeStamp": { - "description": "The time at which the status was measured.\n", - "type": "string" - } - }, - "required": [ - "timeStamp" + "computeOn": { + "description": "An array of strings that defines on which write operations the value is\ncomputed.\n", + "example": [ + "insert", + "update", + "replace" ], - "type": "object" + "items": { + "enum": [ + "insert", + "update", + "replace" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true }, - "workerStatus": { - "description": "The details of the Pregel for every DB-Server. Each object key is a DB-Server ID,\n\nand each value is a nested object similar to the `aggregatedStatus` attribute.\n\nIn a single server deployment, there is only a single entry with an empty string as key.\n", - "type": "object" + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", + "type": "string" + }, + "failOnWarning": { + "description": "Whether the write operation fails if the expression produces a warning.\n", + "type": "boolean" + }, + "keepNull": { + "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", + "type": "boolean" + }, + "name": { + "description": "The name of the target attribute.\n", + "type": "string" + }, + "overwrite": { + "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" } }, "required": [ - "aggregatedStatus", - "workerStatus" + "name", + "expression", + "overwrite" ], "type": "object" }, - "edgeCount": { - "description": "The total number of edges processed.\n", - "type": "integer" - }, - "expires": { - "description": "The date and time when the job results expire. The expiration date is only\nmeaningful for jobs that were completed, canceled or resulted in an error. Such jobs\nare cleaned up by the garbage collection when they reach their expiration date/time.\n", - "type": "string" - }, - "gss": { - "description": "The number of global supersteps executed.\n", - "type": "integer" - }, - "gssTimes": { - "description": "Computation time of each global super step. Is shown when the computation started.\n", - "items": { - "type": "number" + "type": "array" + }, + "distributeShardsLike": { + "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", + "type": "string" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isDisjoint": { + "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "type": "boolean" + }, + "keyOptions": { + "description": "An object which contains key generation options.\n", + "properties": { + "allowUserKeys": { + "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", + "type": "boolean" }, - "type": "array" - }, - "id": { - "description": "The ID of the Pregel job, as a string.\n", - "type": "string" - }, - "reports": { - "description": "This attribute is used by Programmable Pregel Algorithms (`ppa`, experimental).\nThe value is only populated once the algorithm has finished.\n", - "items": { - "type": "object" + "increment": { + "description": "The increment value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" }, - "type": "array" - }, - "startupTime": { - "description": "The startup runtime of the execution.\nThe startup time includes the data loading time and can be substantial.\n", - "type": "number" - }, - "state": { - "description": "The state of the execution. The following values can be returned:\n- `\"none\"`: The Pregel run has not started yet.\n- `\"loading\"`: The graph is being loaded from the database into memory before\n executing the algorithm.\n- `\"running\"`: The algorithm is executing normally.\n- `\"storing\"`: The algorithm finished, but the results are still being written\n back into the collections. Only occurs if the `store` parameter is set to `true`.\n- `\"done\"`: The execution is done. This means that storing is also done.\n This event is announced in the server log (requires at least the `info`\n log level for the `pregel` log topic).\n- `\"canceled\"`: The execution was permanently canceled, either by the user or by\n an error.\n- `\"in error\"`: The execution is in an error state. This can be caused by\n primary DB-Servers being unreachable or unresponsive. The execution\n might recover later, or switch to `\"canceled\"` if it is not able to recover\n successfully.\n- `\"recovering\"`: The execution is actively recovering and\n switches back to `running` if the recovery is successful.\n- `\"fatal error\"`: The execution has failed and cannot recover.\n", - "type": "string" - }, - "storageTime": { - "description": "The time for storing the results if the job includes results storage.\nIs shown when the storing started.\n", - "type": "number" - }, - "totalRuntime": { - "description": "The total runtime of the execution up to now (if the execution is still ongoing).\n", - "type": "number" - }, - "ttl": { - "description": "The TTL (time to live) value for the job results, specified in seconds.\nThe TTL is used to calculate the expiration date for the job's results.\n", - "type": "number" + "lastValue": { + "description": "The offset value for the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", + "type": "integer" + }, + "offset": { + "description": "The initial offset value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the key generator.\n", + "enum": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "type": "string" + } }, - "vertexCount": { - "description": "The total number of vertices processed.\n", - "type": "integer" - } + "required": [ + "type", + "allowUserKeys" + ], + "type": "object" }, - "required": [ - "id", - "algorithm", - "created", - "ttl", - "state", - "gss", - "totalRuntime", - "startupTime", - "computationTime", - "reports", - "detail" - ], - "type": "object" - }, - "type": "array" - } - } - }, - "description": "is returned if the list of jobs can be retrieved successfully.\n" - } - }, - "summary": "Get the execution statistics of all Pregel jobs", - "tags": [ - "Pregel" - ] - } - }, - "/_api/control_pregel/history/{id}": { - "delete": { - "description": "Removes the persisted execution statistics of a finished Pregel job.\n", - "operationId": "deletePregelJobStatistics", - "parameters": [ - { - "description": "The Pregel job identifier.\n", - "in": "path", - "name": "id", - "required": true, - "schema": { - "type": "number" - } - } - ], - "responses": { - "200": { - "description": "is returned if the Pregel job ID is valid.\n" - }, - "404": { - "description": "is returned if no Pregel job with the specified ID is found or if the ID\nis invalid.\n" - } - }, - "summary": "Remove the execution statistics of a past Pregel job", - "tags": [ - "Pregel" - ] - }, - "get": { - "description": "Returns the current state of the execution, the current global superstep, the\nruntime, the global aggregator values, as well as the number of sent and\nreceived messages.\n\nThe execution statistics are persisted to a system collection and kept until you\nremove them, whereas the `/_api/control_pregel/{id}` endpoint only keeps the\ninformation temporarily in memory.\n", - "operationId": "getPregelJobStatistics", - "parameters": [ - { - "description": "Pregel job identifier.\n", - "in": "path", - "name": "id", - "required": true, - "schema": { - "type": "number" - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "description": "The information about the Pregel job.\n", - "properties": { - "algorithm": { - "description": "The algorithm used by the job.\n", - "type": "string" - }, - "computationTime": { - "description": "The algorithm execution time. Is shown when the computation started.\n", - "type": "number" - }, - "created": { - "description": "The date and time when the job was created.\n", + "name": { + "description": "The name of this collection.\n", "type": "string" }, - "detail": { - "description": "The Pregel run details.\n", - "properties": { - "aggregatedStatus": { - "description": "The aggregated details of the full Pregel run. The values are totals of all the\nDB-Server.\n", - "properties": { - "allGssStatus": { - "description": "Information about the global supersteps.\n", - "properties": { - "items": { - "description": "A list of objects with details for each global superstep.\n", - "items": { - "properties": { - "memoryBytesUsedForMessages": { - "description": "The number of bytes used in memory for the messages in this step.\n", - "type": "integer" - }, - "messagesReceived": { - "description": "The number of messages received in this step.\n", - "type": "integer" - }, - "messagesSent": { - "description": "The number of messages sent in this step.\n", - "type": "integer" - }, - "verticesProcessed": { - "description": "The number of vertices that have been processed in this step.\n", - "type": "integer" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "graphStoreStatus": { - "description": "The status of the in memory graph.\n", - "properties": { - "edgesLoaded": { - "description": "The number of edges that are loaded from the database into memory.\n", - "type": "integer" - }, - "memoryBytesUsed": { - "description": "The number of bytes used in-memory for the loaded graph.\n", - "type": "integer" - }, - "verticesLoaded": { - "description": "The number of vertices that are loaded from the database into memory.\n", - "type": "integer" - }, - "verticesStored": { - "description": "The number of vertices that are written back to the database after the Pregel\ncomputation finished. It is only set if the `store` parameter is set to `true`.\n", - "type": "integer" - } - }, - "type": "object" - }, - "timeStamp": { - "description": "The time at which the status was measured.\n", - "type": "string" - } - }, - "required": [ - "timeStamp" - ], - "type": "object" - }, - "workerStatus": { - "description": "The details of the Pregel for every DB-Server. Each object key is a DB-Server ID,\n\nand each value is a nested object similar to the `aggregatedStatus` attribute.\n\nIn a single server deployment, there is only a single entry with an empty string as key.\n", - "type": "object" - } - }, - "required": [ - "aggregatedStatus", - "workerStatus" - ], - "type": "object" - }, - "edgeCount": { - "description": "The total number of edges processed.\n", + "numberOfShards": { + "description": "The number of shards of the collection. _(cluster only)_\n", "type": "integer" }, - "expires": { - "description": "The date and time when the job results expire. The expiration date is only\nmeaningful for jobs that were completed, canceled or resulted in an error. Such jobs\nare cleaned up by the garbage collection when they reach their expiration date/time.\n", - "type": "string" - }, - "gss": { - "description": "The number of global supersteps executed.\n", + "replicationFactor": { + "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", "type": "integer" }, - "gssTimes": { - "description": "Computation time of each global super step. Is shown when the computation started.\n", + "schema": { + "description": "An object that specifies the collection-level schema for documents.\n", + "type": "object" + }, + "shardKeys": { + "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", "items": { - "type": "number" + "type": "string" }, "type": "array" }, - "id": { - "description": "The ID of the Pregel job, as a string.\n", + "shardingStrategy": { + "description": "The sharding strategy selected for the collection. _(cluster only)_\n", + "enum": [ + "community-compat", + "enterprise-compat", + "enterprise-smart-edge-compat", + "hash", + "enterprise-hash-smart-edge", + "enterprise-hex-smart-vertex" + ], "type": "string" }, - "reports": { - "description": "This attribute is used by Programmable Pregel Algorithms (`ppa`, experimental).\nThe value is only populated once the algorithm has finished.\n", - "items": { - "type": "object" - }, - "type": "array" - }, - "startupTime": { - "description": "The startup runtime of the execution.\nThe startup time includes the data loading time and can be substantial.\n", - "type": "number" + "smartGraphAttribute": { + "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", + "type": "string" }, - "state": { - "description": "The state of the execution. The following values can be returned:\n- `\"none\"`: The Pregel run has not started yet.\n- `\"loading\"`: The graph is being loaded from the database into memory before\n executing the algorithm.\n- `\"running\"`: The algorithm is executing normally.\n- `\"storing\"`: The algorithm finished, but the results are still being written\n back into the collections. Only occurs if the `store` parameter is set to `true`.\n- `\"done\"`: The execution is done. This means that storing is also done.\n This event is announced in the server log (requires at least the `info`\n log level for the `pregel` log topic).\n- `\"canceled\"`: The execution was permanently canceled, either by the user or by\n an error.\n- `\"in error\"`: The execution is in an error state. This can be caused by\n primary DB-Servers being unreachable or unresponsive. The execution\n might recover later, or switch to `\"canceled\"` if it is not able to recover\n successfully.\n- `\"recovering\"`: The execution is actively recovering and\n switches back to `running` if the recovery is successful.\n- `\"fatal error\"`: The execution has failed and cannot recover.\n", + "smartJoinAttribute": { + "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", "type": "string" }, - "storageTime": { - "description": "The time for storing the results if the job includes results storage.\nIs shown when the storing started.\n", - "type": "number" + "syncByRevision": { + "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", + "type": "boolean" }, - "totalRuntime": { - "description": "The total runtime of the execution up to now (if the execution is still ongoing).\n", - "type": "number" + "type": { + "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", + "type": "integer" }, - "ttl": { - "description": "The TTL (time to live) value for the job results, specified in seconds.\nThe TTL is used to calculate the expiration date for the job's results.\n", - "type": "number" + "waitForSync": { + "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", + "type": "boolean" }, - "vertexCount": { - "description": "The total number of vertices processed.\n", + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", "type": "integer" } }, "required": [ - "id", - "algorithm", - "created", - "ttl", - "state", - "gss", - "totalRuntime", - "startupTime", - "computationTime", - "reports", - "detail" + "waitForSync", + "keyOptions", + "cacheEnabled", + "syncByRevision" ], "type": "object" } } }, - "description": "is returned if the Pregel job ID is valid and the execution statistics are\nreturned along with the response.\n" + "description": "The collection has been created.\n" }, - "404": { - "description": "is returned if no Pregel job with the specified ID is found or if the ID\nis invalid.\n" + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `name` or another required attribute is missing or an attribute\nhas an invalid value.\n" } }, - "summary": "Get the execution statistics of a Pregel job", + "summary": "Create a collection", "tags": [ - "Pregel" + "Collections" ] } }, - "/_api/control_pregel/{id}": { + "/_db/{database-name}/_api/collection/{collection-name}": { "delete": { - "description": "Cancel an execution which is still running, and discard any intermediate\nresults. This immediately frees all memory taken up by the execution, and\nmakes you lose all intermediary data.\n\nYou might get inconsistent results if you requested to store the results and\nthen cancel an execution when it is already in its `\"storing\"` state (or\n`\"done\"` state in versions prior to 3.7.1). The data is written multi-threaded\ninto all collection shards at once. This means there are multiple transactions\nsimultaneously. A transaction might already be committed when you cancel the\nexecution job. Therefore, you might see some updated documents, while other\ndocuments have no or stale results from a previous execution.\n", - "operationId": "deletePregelJob", + "description": "Delete the collection identified by `collection-name` and all its documents.\n", + "operationId": "deleteCollection", "parameters": [ { - "description": "Pregel execution identifier.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "id", + "name": "database-name", "required": true, "schema": { - "type": "number" + "type": "string" } - } - ], - "responses": { - "200": { - "description": "HTTP 200 is returned if the job execution ID was valid.\n" }, - "404": { - "description": "An HTTP 404 error is returned if no Pregel job with the specified execution number\nis found or the execution number is invalid.\n" - } - }, - "summary": "Cancel a Pregel job execution", - "tags": [ - "Pregel" - ] - }, - "get": { - "description": "Returns the current state of the execution, the current global superstep, the\nruntime, the global aggregator values as well as the number of sent and\nreceived messages.\n", - "operationId": "getPregelJob", - "parameters": [ { - "description": "Pregel execution identifier.\n", + "description": "The name of the collection to drop.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", "in": "path", - "name": "id", + "name": "collection-name", "required": true, "schema": { - "type": "number" + "type": "string" + } + }, + { + "description": "Whether or not the collection to drop is a system collection. This parameter\nmust be set to `true` in order to drop a system collection.\n", + "in": "query", + "name": "isSystem", + "required": false, + "schema": { + "type": "boolean" } } ], @@ -6557,752 +5744,797 @@ "content": { "application/json": { "schema": { - "description": "The information about the Pregel job.\n", "properties": { - "algorithm": { - "description": "The algorithm used by the job.\n", + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "id": { + "description": "The identifier of the dropped collection.\n", "type": "string" + } + }, + "required": [ + "error", + "code", + "id" + ], + "type": "object" + } + } + }, + "description": "Dropping the collection has been successful.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" }, - "computationTime": { - "description": "The algorithm execution time. Is shown when the computation started.\n", - "type": "number" + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" }, - "created": { - "description": "The date and time when the job was created.\n", + "errorMessage": { + "description": "A descriptive error message.\n", "type": "string" }, - "detail": { - "description": "The Pregel run details.\n", - "properties": { - "aggregatedStatus": { - "description": "The aggregated details of the full Pregel run. The values are totals of all the\nDB-Server.\n", - "properties": { - "allGssStatus": { - "description": "Information about the global supersteps.\n", - "properties": { - "items": { - "description": "A list of objects with details for each global superstep.\n", - "items": { - "properties": { - "memoryBytesUsedForMessages": { - "description": "The number of bytes used in memory for the messages in this step.\n", - "type": "integer" - }, - "messagesReceived": { - "description": "The number of messages received in this step.\n", - "type": "integer" - }, - "messagesSent": { - "description": "The number of messages sent in this step.\n", - "type": "integer" - }, - "verticesProcessed": { - "description": "The number of vertices that have been processed in this step.\n", - "type": "integer" - } - }, - "type": "object" - }, - "type": "array" - } - }, - "type": "object" - }, - "graphStoreStatus": { - "description": "The status of the in memory graph.\n", - "properties": { - "edgesLoaded": { - "description": "The number of edges that are loaded from the database into memory.\n", - "type": "integer" - }, - "memoryBytesUsed": { - "description": "The number of bytes used in-memory for the loaded graph.\n", - "type": "integer" - }, - "verticesLoaded": { - "description": "The number of vertices that are loaded from the database into memory.\n", - "type": "integer" - }, - "verticesStored": { - "description": "The number of vertices that are written back to the database after the Pregel\ncomputation finished. It is only set if the `store` parameter is set to `true`.\n", - "type": "integer" - } - }, - "type": "object" - }, - "timeStamp": { - "description": "The time at which the status was measured.\n", - "type": "string" - } - }, - "required": [ - "timeStamp" - ], - "type": "object" - }, - "workerStatus": { - "description": "The details of the Pregel for every DB-Server. Each object key is a DB-Server ID,\n\nand each value is a nested object similar to the `aggregatedStatus` attribute.\n\nIn a single server deployment, there is only a single entry with an empty string as key.\n", - "type": "object" - } - }, - "required": [ - "aggregatedStatus", - "workerStatus" - ], - "type": "object" - }, - "edgeCount": { - "description": "The total number of edges processed.\n", + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, "type": "integer" }, - "expires": { - "description": "The date and time when the job results expire. The expiration date is only\nmeaningful for jobs that were completed, canceled or resulted in an error. Such jobs\nare cleaned up by the garbage collection when they reach their expiration date/time.\n", + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", "type": "string" }, - "gss": { - "description": "The number of global supersteps executed.\n", + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" + } + }, + "summary": "Drop a collection", + "tags": [ + "Collections" + ] + }, + "get": { + "description": "Returns the basic information about a specific collection.\n", + "operationId": "getCollection", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, "type": "integer" }, - "gssTimes": { - "description": "Computation time of each global super step. Is shown when the computation started.\n", - "items": { - "type": "number" - }, - "type": "array" + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" }, - "id": { - "description": "The ID of the Pregel job, as a string.\n", + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", "type": "string" }, - "reports": { - "description": "This attribute is used by Programmable Pregel Algorithms (`ppa`, experimental).\nThe value is only populated once the algorithm has finished.\n", - "items": { - "type": "object" - }, - "type": "array" - }, - "startupTime": { - "description": "The startup runtime of the execution.\nThe startup time includes the data loading time and can be substantial.\n", - "type": "number" - }, - "state": { - "description": "The state of the execution. The following values can be returned:\n- `\"none\"`: The Pregel run has not started yet.\n- `\"loading\"`: The graph is being loaded from the database into memory before\n executing the algorithm.\n- `\"running\"`: The algorithm is executing normally.\n- `\"storing\"`: The algorithm finished, but the results are still being written\n back into the collections. Only occurs if the `store` parameter is set to `true`.\n- `\"done\"`: The execution is done. This means that storing is also done.\n This event is announced in the server log (requires at least the `info`\n log level for the `pregel` log topic).\n- `\"canceled\"`: The execution was permanently canceled, either by the user or by\n an error.\n- `\"in error\"`: The execution is in an error state. This can be caused by\n primary DB-Servers being unreachable or unresponsive. The execution\n might recover later, or switch to `\"canceled\"` if it is not able to recover\n successfully.\n- `\"recovering\"`: The execution is actively recovering and\n switches back to `running` if the recovery is successful.\n- `\"fatal error\"`: The execution has failed and cannot recover.\n", + "id": { + "description": "A unique identifier of the collection (deprecated).\n", "type": "string" }, - "storageTime": { - "description": "The time for storing the results if the job includes results storage.\nIs shown when the storing started.\n", - "type": "number" + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "example": false, + "type": "boolean" }, - "totalRuntime": { - "description": "The total runtime of the execution up to now (if the execution is still ongoing).\n", - "type": "number" + "name": { + "description": "The name of the collection.\n", + "example": "coll", + "type": "string" }, - "ttl": { - "description": "The TTL (time to live) value for the job results, specified in seconds.\nThe TTL is used to calculate the expiration date for the job's results.\n", - "type": "number" + "status": { + "description": "The status of the collection.\n- `3`: loaded\n- `5`: deleted\n\nEvery other status indicates a corrupted collection.\n", + "example": 3, + "type": "integer" }, - "vertexCount": { - "description": "The total number of vertices processed.\n", + "type": { + "description": "The type of the collection:\n- `0`: \"unknown\"\n- `2`: regular document collection\n- `3`: edge collection\n", + "example": 2, "type": "integer" } }, "required": [ + "error", + "code", "id", - "algorithm", - "created", - "ttl", - "state", - "gss", - "totalRuntime", - "startupTime", - "computationTime", - "reports", - "detail" + "name", + "status", + "type", + "isSystem", + "globallyUniqueId" ], "type": "object" } } }, - "description": "HTTP 200 is returned in case the job execution ID was valid and the state is\nreturned along with the response.\n" + "description": "The basic information about a collection.\n" }, "404": { - "description": "An HTTP 404 error is returned if no Pregel job with the specified execution number\nis found or the execution number is invalid.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "example": 1203, + "type": "integer" + } + }, + "required": [ + "code", + "error", + "errorMessage", + "errorNum" + ], + "type": "object" + } + } + }, + "description": "The specified collection is unknown.\n" } }, - "summary": "Get a Pregel job execution status", + "summary": "Get the collection information", "tags": [ - "Pregel" + "Collections" ] } }, - "/_api/cursor": { - "post": { - "description": "Submits an AQL query for execution in the current database. The server returns\na result batch and may indicate that further batches need to be fetched using\na cursor identifier.\n\nThe query details include the query string plus optional query options and\nbind parameters. These values need to be passed in a JSON representation in\nthe body of the POST request.\n", - "operationId": "createAqlQueryCursor", + "/_db/{database-name}/_api/collection/{collection-name}/checksum": { + "get": { + "description": "Calculates a checksum of the meta-data (keys and optionally revision ids) and\noptionally the document data in the collection.\n\nThe checksum can be used to compare if two collections on different ArangoDB\ninstances contain the same contents. The current revision of the collection is\nreturned too so one can make sure the checksums are calculated for the same\nstate of data.\n\nBy default, the checksum is only calculated on the `_key` system attribute\nof the documents contained in the collection. For edge collections, the system\nattributes `_from` and `_to` are also included in the calculation.\n\nBy setting the optional query parameter `withRevisions` to `true`, then revision\nIDs (`_rev` system attributes) are included in the checksumming.\n\nBy providing the optional query parameter `withData` with a value of `true`,\nthe user-defined document attributes are included in the calculation, too.\n\n\u003e **INFO:**\nIncluding user-defined attributes will make the checksumming slower.\n", + "operationId": "getCollectionChecksum", "parameters": [ { - "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThe header is ignored if this operation is part of a Stream Transaction\n(`x-arango-trx-id` header). The header set when creating the transaction decides\nabout dirty reads for the entire transaction, not the individual read operations.\n", - "in": "header", - "name": "x-arango-allow-dirty-read", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Whether or not to include document revision ids in the checksum calculation.\n", + "in": "query", + "name": "withRevisions", "required": false, "schema": { "type": "boolean" } }, { - "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", - "in": "header", - "name": "x-arango-trx-id", + "description": "Whether or not to include document body data in the checksum calculation.\n", + "in": "query", + "name": "withData", "required": false, "schema": { - "type": "string" + "type": "boolean" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "batchSize": { - "description": "maximum number of result documents to be transferred from\nthe server to the client in one roundtrip. If this attribute is\nnot set, a server-controlled default value will be used. A `batchSize` value of\n`0` is disallowed.\n", - "type": "integer" - }, - "bindVars": { - "description": "An object with key/value pairs representing the bind parameters.\nFor a bind variable `@var` in the query, specify the value using an attribute\nwith the name `var`. For a collection bind variable `@@coll`, use `@coll` as the\nattribute name. For example: `\"bindVars\": { \"var\": 42, \"@coll\": \"products\" }`.\n", - "type": "object" - }, - "cache": { - "description": "flag to determine whether the AQL query results cache\nshall be used. If set to `false`, then any query cache lookup will be skipped\nfor the query. If set to `true`, it will lead to the query cache being checked\nfor the query if the query cache mode is either `on` or `demand`.\n", - "type": "boolean" - }, - "count": { - "description": "indicates whether the number of documents in the result set should be returned in\nthe \"count\" attribute of the result.\nCalculating the \"count\" attribute might have a performance impact for some queries\nin the future so this option is turned off by default, and \"count\"\nis only returned when requested.\n", - "type": "boolean" - }, - "memoryLimit": { - "description": "the maximum number of memory (measured in bytes) that the query is allowed to\nuse. If set, then the query will fail with error \"resource limit exceeded\" in\ncase it allocates too much memory. A value of `0` indicates that there is no\nmemory limit.\n", - "type": "integer" - }, - "options": { - "description": "key/value object with extra options for the query.\n", - "properties": { - "allowDirtyReads": { - "description": "If you set this option to `true` and execute the query against a cluster\ndeployment, then the Coordinator is allowed to read from any shard replica and\nnot only from the leader.\n\nYou may observe data inconsistencies (dirty reads) when reading from followers,\nnamely obsolete revisions of documents because changes have not yet been\nreplicated to the follower, as well as changes to documents before they are\nofficially committed on the leader.\n\nThis feature is only available in the Enterprise Edition.\n", - "type": "boolean" - }, - "allowRetry": { - "description": "Set this option to `true` to make it possible to retry\nfetching the latest batch from a cursor. The default is `false`.\n\nIf retrieving a result batch fails because of a connection issue, you can ask\nfor that batch again using the `POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e`\nendpoint. The first batch has an ID of `1` and the value is incremented by 1\nwith every batch. Every result response except the last one also includes a\n`nextBatchId` attribute, indicating the ID of the batch after the current.\nYou can remember and use this batch ID should retrieving the next batch fail.\n\nYou can only request the latest batch again (or the next batch).\nEarlier batches are not kept on the server-side.\nRequesting a batch again does not advance the cursor.\n\nYou can also call this endpoint with the next batch identifier, i.e. the value\nreturned in the `nextBatchId` attribute of a previous request. This advances the\ncursor and returns the results of the next batch. This is only supported if there\nare more results in the cursor (i.e. `hasMore` is `true` in the latest batch).\n\nFrom v3.11.1 onward, you may use the `POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e`\nendpoint even if the `allowRetry` attribute is `false` to fetch the next batch,\nbut you cannot request a batch again unless you set it to `true`.\n\nTo allow refetching of the very last batch of the query, the server cannot\nautomatically delete the cursor. After the first attempt of fetching the last\nbatch, the server would normally delete the cursor to free up resources. As you\nmight need to reattempt the fetch, it needs to keep the final batch when the\n`allowRetry` option is enabled. Once you successfully received the last batch,\nyou should call the `DELETE /_api/cursor/\u003ccursor-id\u003e` endpoint so that the\nserver doesn't unnecessarily keep the batch until the cursor times out\n(`ttl` query option).\n", - "type": "boolean" - }, - "failOnWarning": { - "description": "When set to `true`, the query will throw an exception and abort instead of producing\na warning. This option should be used during development to catch potential issues\nearly. When the attribute is set to `false`, warnings will not be propagated to\nexceptions and will be returned with the query result.\nThere is also a server configuration option `--query.fail-on-warning` for setting the\ndefault value for `failOnWarning` so it does not need to be set on a per-query level.\n", - "type": "boolean" - }, - "fillBlockCache": { - "description": "if set to `true` or not specified, this will make the query store the data it\nreads via the RocksDB storage engine in the RocksDB block cache. This is usually\nthe desired behavior. The option can be set to `false` for queries that are\nknown to either read a lot of data which would thrash the block cache, or for queries\nthat read data which are known to be outside of the hot set. By setting the option\nto `false`, data read by the query will not make it into the RocksDB block cache if\nnot already in there, thus leaving more room for the actual hot set.\n", - "type": "boolean" - }, - "fullCount": { - "description": "if set to `true` and the query contains a `LIMIT` clause, then the\nresult will have an `extra` attribute with the sub-attributes `stats`\nand `fullCount`, `{ ... , \"extra\": { \"stats\": { \"fullCount\": 123 } } }`.\nThe `fullCount` attribute will contain the number of documents in the result before the\nlast top-level LIMIT in the query was applied. It can be used to count the number of\ndocuments that match certain filter criteria, but only return a subset of them, in one go.\nIt is thus similar to MySQL's *SQL_CALC_FOUND_ROWS* hint. Note that setting the option\nwill disable a few LIMIT optimizations and may lead to more documents being processed,\nand thus make queries run longer. Note that the `fullCount` attribute may only\nbe present in the result if the query has a top-level LIMIT clause and the LIMIT\nclause is actually used in the query.\n", - "type": "boolean" - }, - "intermediateCommitCount": { - "description": "The maximum number of operations after which an intermediate commit is performed\nautomatically.\n", - "type": "integer" - }, - "intermediateCommitSize": { - "description": "The maximum total size of operations after which an intermediate commit is performed\nautomatically.\n", - "type": "integer" - }, - "maxDNFConditionMembers": { - "description": "A threshold for the maximum number of `OR` sub-nodes in the internal\nrepresentation of an AQL `FILTER` condition.\n\nYon can use this option to limit the computation time and memory usage when\nconverting complex AQL `FILTER` conditions into the internal DNF\n(disjunctive normal form) format. `FILTER` conditions with a lot of logical\nbranches (`AND`, `OR`, `NOT`) can take a large amount of processing time and\nmemory. This query option limits the computation time and memory usage for\nsuch conditions.\n\nOnce the threshold value is reached during the DNF conversion of a `FILTER`\ncondition, the conversion is aborted, and the query continues with a simplified\ninternal representation of the condition, which **cannot be used for index lookups**.\n\nYou can set the threshold globally instead of per query with the\n`--query.max-dnf-condition-members` startup option.\n", - "type": "integer" - }, - "maxNodesPerCallstack": { - "description": "The number of execution nodes in the query plan after that stack splitting is\nperformed to avoid a potential stack overflow. Defaults to the configured value\nof the startup option `--query.max-nodes-per-callstack`.\n\nThis option is only useful for testing and debugging and normally does not need\nany adjustment.\n", - "type": "integer" - }, - "maxNumberOfPlans": { - "description": "Limits the maximum number of plans that are created by the AQL query optimizer.\n", - "type": "integer" - }, - "maxRuntime": { - "description": "The query has to be executed within the given runtime or it is killed.\nThe value is specified in seconds. The default value is `0.0` (no timeout).\n", - "type": "number" - }, - "maxTransactionSize": { - "description": "The transaction size limit in bytes.\n", - "type": "integer" - }, - "maxWarningCount": { - "description": "Limits the maximum number of warnings a query will return. The number of warnings\na query will return is limited to 10 by default, but that number can be increased\nor decreased by setting this attribute.\n", - "type": "integer" - }, - "optimizer": { - "description": "Options related to the query optimizer.\n", - "properties": { - "rules": { - "description": "A list of to-be-included or to-be-excluded optimizer rules can be put into this\nattribute, telling the optimizer to include or exclude specific rules. To disable\na rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is\nalso a pseudo-rule `all`, which matches all optimizer rules. `-all` disables all rules.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "profile": { - "description": "If set to `true` or `1`, then the additional query profiling information is returned\nin the `profile` sub-attribute of the `extra` return attribute, unless the query result\nis served from the query cache. If set to `2`, the query includes execution stats\nper query plan node in `stats.nodes` sub-attribute of the `extra` return attribute.\nAdditionally, the query plan is returned in the `extra.plan` sub-attribute.\n", - "type": "integer" - }, - "satelliteSyncWait": { - "description": "This *Enterprise Edition* parameter allows to configure how long a DB-Server has time\nto bring the SatelliteCollections involved in the query into sync.\nThe default value is `60.0` seconds. When the maximal time is reached, the query\nis stopped.\n", - "type": "number" - }, - "skipInaccessibleCollections": { - "description": "Let AQL queries (especially graph traversals) treat collection to which a user\nhas no access rights for as if these collections are empty. Instead of returning a\nforbidden access error, your queries execute normally. This is intended to help\nwith certain use-cases: A graph contains several collections and different users\nexecute AQL queries on that graph. You can naturally limit the accessible\nresults by changing the access rights of users on collections.\n\nThis feature is only available in the Enterprise Edition.\n", - "type": "boolean" - }, - "spillOverThresholdMemoryUsage": { - "description": "This option allows queries to store intermediate and final results temporarily\non disk if the amount of memory used (in bytes) exceeds the specified value.\nThis is used for decreasing the memory usage during the query execution.\n\nThis option only has an effect on queries that use the `SORT` operation but\nwithout a `LIMIT`, and if you enable the spillover feature by setting a path\nfor the directory to store the temporary data in with the\n`--temp.intermediate-results-path` startup option.\n\nDefault value: 128MB.\n\n\u003e **INFO:**\nSpilling data from RAM onto disk is an experimental feature and is turned off\nby default. The query results are still built up entirely in RAM on Coordinators\nand single servers for non-streaming queries. To avoid the buildup of\nthe entire query result in RAM, use a streaming query (see the `stream` option).\n", - "type": "integer" - }, - "spillOverThresholdNumRows": { - "description": "This option allows queries to store intermediate and final results temporarily\non disk if the number of rows produced by the query exceeds the specified value.\nThis is used for decreasing the memory usage during the query execution. In a\nquery that iterates over a collection that contains documents, each row is a\ndocument, and in a query that iterates over temporary values\n(i.e. `FOR i IN 1..100`), each row is one of such temporary values.\n\nThis option only has an effect on queries that use the `SORT` operation but\nwithout a `LIMIT`, and if you enable the spillover feature by setting a path\nfor the directory to store the temporary data in with the\n`--temp.intermediate-results-path` startup option.\n\nDefault value: `5000000` rows.\n\n\u003e **INFO:**\nSpilling data from RAM onto disk is an experimental feature and is turned off\nby default. The query results are still built up entirely in RAM on Coordinators\nand single servers for non-streaming queries. To avoid the buildup of\nthe entire query result in RAM, use a streaming query (see the `stream` option).\n", - "type": "integer" - }, - "stream": { - "description": "Can be enabled to execute the query lazily. If set to `true`, then the query is\nexecuted as long as necessary to produce up to `batchSize` results. These\nresults are returned immediately and the query is suspended until the client\nasks for the next batch (if there are more results). Depending on the query\nthis can mean that the first results will be available much faster and that\nless memory is needed because the server only needs to store a subset of\nresults at a time. Read-only queries can benefit the most, unless `SORT`\nwithout index or `COLLECT` are involved that make it necessary to process all\ndocuments before a partial result can be returned. It is advisable to only use\nthis option for queries without exclusive locks.\n\nRemarks:\n- The query will hold resources until it ends (such as RocksDB snapshots, which\n prevents compaction to some degree). Writes will be in memory until the query\n is committed.\n- If existing documents are modified, then write locks are held on these\n documents and other queries trying to modify the same documents will fail\n because of this conflict.\n- A streaming query may fail late because of a conflict or for other reasons\n after some batches were already returned successfully, possibly rendering the\n results up to that point meaningless.\n- The query options `cache`, `count` and `fullCount` are not supported for\n streaming queries.\n- Query statistics, profiling data and warnings are delivered as part of the\n last batch.\n\nIf the `stream` option is `false` (default), then the complete result of the\nquery is calculated before any of it is returned to the client. The server\nstores the full result in memory (on the contacted Coordinator if in a cluster).\nAll other resources are freed immediately (locks, RocksDB snapshots). The query\nwill fail before it returns results in case of a conflict.\n", - "type": "boolean" - } + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "checksum": { + "description": "The calculated checksum as a number.\n" }, - "type": "object" - }, - "query": { - "description": "contains the query string to be executed\n", - "type": "string" + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "example": false, + "type": "boolean" + }, + "name": { + "description": "The name of the collection.\n", + "example": "coll", + "type": "string" + }, + "revision": { + "description": "The collection revision id as a string.\n" + }, + "status": { + "description": "The status of the collection.\n- `3`: loaded\n- `5`: deleted\n\nEvery other status indicates a corrupted collection.\n", + "example": 3, + "type": "integer" + }, + "type": { + "description": "The type of the collection:\n- `0`: \"unknown\"\n- `2`: regular document collection\n- `3`: edge collection\n", + "example": 2, + "type": "integer" + } }, - "ttl": { - "description": "The time-to-live for the cursor (in seconds). If the result set is small enough\n(less than or equal to `batchSize`) then results are returned right away.\nOtherwise they are stored in memory and will be accessible via the cursor with\nrespect to the `ttl`. The cursor will be removed on the server automatically\nafter the specified amount of time. This is useful to ensure garbage collection\nof cursors that are not fully fetched by clients. If not set, a server-defined\nvalue will be used (default: 30 seconds).\nThe time-to-live is renewed upon every access to the cursor.\n", - "type": "integer" - } - }, - "required": [ - "query" - ], - "type": "object" + "required": [ + "checksum", + "revision", + "error", + "code", + "id", + "name", + "status", + "type", + "isSystem", + "globallyUniqueId" + ], + "type": "object" + } } - } + }, + "description": "The basic information about the collection but additionally the\ncollection `checksum` and `revision`.\n" + }, + "400": { + "description": "If the `collection-name` placeholder is missing, then a *HTTP 400* is\nreturned.\n" + }, + "404": { + "description": "If the collection is unknown, then a *HTTP 404*\nis returned.\n" } }, + "summary": "Get the collection checksum", + "tags": [ + "Collections" + ] + } + }, + "/_db/{database-name}/_api/collection/{collection-name}/compact": { + "put": { + "description": "Compacts the data of a collection in order to reclaim disk space.\nThe operation will compact the document and index data by rewriting the\nunderlying .sst files and only keeping the relevant entries.\n\nUnder normal circumstances, running a compact operation is not necessary, as\nthe collection data will eventually get compacted anyway. However, in some\nsituations, e.g. after running lots of update/replace or remove operations,\nthe disk data for a collection may contain a lot of outdated data for which the\nspace shall be reclaimed. In this case the compaction operation can be used.\n", + "operationId": "compactCollection", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the collection to compact\n", + "in": "path", + "name": "collection-name", + "required": true, + "schema": { + "type": "string" + } + } + ], "responses": { - "201": { + "200": { "content": { "application/json": { "schema": { "properties": { - "cached": { - "description": "A boolean flag indicating whether the query result was served\nfrom the query cache or not. If the query result is served from the query\ncache, the `extra` return attribute will not contain any `stats` sub-attribute\nand no `profile` sub-attribute.\n", + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, "type": "boolean" }, - "code": { - "description": "The HTTP status code.\n", + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "example": false, + "type": "boolean" + }, + "name": { + "description": "The name of the collection.\n", + "example": "coll", + "type": "string" + }, + "status": { + "description": "The status of the collection.\n- `3`: loaded\n- `5`: deleted\n\nEvery other status indicates a corrupted collection.\n", + "example": 3, "type": "integer" }, - "count": { - "description": "The total number of result documents available (only\navailable if the query was executed with the `count` attribute set).\n", + "type": { + "description": "The type of the collection:\n- `0`: \"unknown\"\n- `2`: regular document collection\n- `3`: edge collection\n", + "example": 2, + "type": "integer" + } + }, + "required": [ + "error", + "code", + "name", + "type", + "isSystem", + "status", + "id", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "The compaction has been started successfully.\n" + }, + "401": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 401, "type": "integer" }, "error": { - "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "description": "A flag indicating that an error occurred.\n", + "example": true, "type": "boolean" }, - "extra": { - "description": "An optional JSON object with extra information about the query result.\n\nOnly delivered as part of the first batch, or the last batch in case of a cursor\nwith the `stream` option enabled.\n", - "properties": { - "plan": { - "description": "The execution plan.\n", - "properties": { - "collections": { - "description": "A list of the collections involved in the query. The list only includes the\ncollections that can statically be determined at query compile time.\n", - "items": { - "properties": { - "name": { - "description": "The collection name.\n", - "type": "string" - }, - "type": { - "description": "How the collection is used. Can be `\"read\"`, `\"write\"`, or `\"exclusive\"`.\n", - "type": "string" - } - }, - "required": [ - "name", - "type" - ], - "type": "object" - }, - "type": "array" - }, - "estimatedCost": { - "description": "The estimated cost of the query.\n", - "type": "number" - }, - "estimatedNrItems": { - "description": "The estimated number of results.\n", - "type": "integer" - }, - "isModificationQuery": { - "description": "Whether the query contains write operations.\n", - "type": "boolean" - }, - "nodes": { - "description": "A nested list of the execution plan nodes.\n", - "items": { - "type": "object" - }, - "type": "array" - }, - "rules": { - "description": "A list with the names of the applied optimizer rules.\n", - "items": { - "type": "string" - }, - "type": "array" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "If the request was not authenticated as a user with sufficient rights.\n" + } + }, + "summary": "Compact a collection", + "tags": [ + "Collections" + ] + } + }, + "/_db/{database-name}/_api/collection/{collection-name}/count": { + "get": { + "description": "Get the number of documents in a collection.\n", + "operationId": "getCollectionCount", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "cacheEnabled": { + "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", + "type": "boolean" + }, + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "computedValues": { + "description": "A list of objects, each representing a computed value.\n", + "items": { + "properties": { + "computeOn": { + "description": "An array of strings that defines on which write operations the value is\ncomputed.\n", + "example": [ + "insert", + "update", + "replace" + ], + "items": { + "enum": [ + "insert", + "update", + "replace" + ], + "type": "string" }, - "variables": { - "description": "All of the query variables, including user-created and internal ones.\n", - "items": { - "type": "object" - }, - "type": "array" - } + "type": "array", + "uniqueItems": true }, - "required": [ - "nodes", - "rules", - "collections", - "variables", - "estimatedCost", - "estimatedNrItems", - "isModificationQuery" - ], - "type": "object" - }, - "profile": { - "description": "The duration of the different query execution phases in seconds.\n", - "properties": { - "executing": { - "description": "", - "type": "number" - }, - "finalizing": { - "description": "", - "type": "number" - }, - "initializing": { - "description": "", - "type": "number" - }, - "instantiating executors": { - "description": "", - "type": "number" - }, - "instantiating plan": { - "description": "", - "type": "number" - }, - "loading collections": { - "description": "", - "type": "number" - }, - "optimizing ast": { - "description": "", - "type": "number" - }, - "optimizing plan": { - "description": "", - "type": "number" - }, - "parsing": { - "description": "", - "type": "number" - } + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", + "type": "string" }, - "required": [ - "initializing", - "parsing", - "optimizing ast", - "loading collections", - "instantiating plan", - "optimizing plan", - "instantiating executors", - "executing", - "finalizing" - ], - "type": "object" + "failOnWarning": { + "description": "Whether the write operation fails if the expression produces a warning.\n", + "type": "boolean" + }, + "keepNull": { + "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", + "type": "boolean" + }, + "name": { + "description": "The name of the target attribute.\n", + "type": "string" + }, + "overwrite": { + "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" + } }, - "stats": { - "description": "An object with query statistics.\n", - "properties": { - "cacheHits": { - "description": "The total number of index entries read from in-memory caches for indexes\nof type edge or persistent. This value is only non-zero when reading from indexes\nthat have an in-memory cache enabled, and when the query allows using the in-memory\ncache (i.e. using equality lookups on all index attributes).\n", - "type": "integer" - }, - "cacheMisses": { - "description": "The total number of cache read attempts for index entries that could not\nbe served from in-memory caches for indexes of type edge or persistent. This value\nis only non-zero when reading from indexes that have an in-memory cache enabled, the\nquery allows using the in-memory cache (i.e. using equality lookups on all index attributes)\nand the looked up values are not present in the cache.\n", - "type": "integer" - }, - "cursorsCreated": { - "description": "The total number of cursor objects created during query execution. Cursor\nobjects are created for index lookups.\n", - "type": "integer" - }, - "cursorsRearmed": { - "description": "The total number of times an existing cursor object was repurposed.\nRepurposing an existing cursor object is normally more efficient compared to destroying an\nexisting cursor object and creating a new one from scratch.\n", - "type": "integer" - }, - "executionTime": { - "description": "The query execution time (wall-clock time) in seconds.\n", - "type": "number" - }, - "filtered": { - "description": "The total number of documents removed after executing a filter condition\nin a `FilterNode` or another node that post-filters data. Note that nodes of the\n`IndexNode` type can also filter documents by selecting only the required index range\nfrom a collection, and the `filtered` value only indicates how much filtering was done by a\npost filter in the `IndexNode` itself or following `FilterNode` nodes.\nNodes of the `EnumerateCollectionNode` and `TraversalNode` types can also apply\nfilter conditions and can report the number of filtered documents.\n", - "type": "integer" - }, - "fullCount": { - "description": "The total number of documents that matched the search condition if the query's\nfinal top-level `LIMIT` operation were not present.\nThis attribute may only be returned if the `fullCount` option was set when starting the\nquery and only contains a sensible value if the query contains a `LIMIT` operation on\nthe top level.\n", - "type": "integer" - }, - "httpRequests": { - "description": "The total number of cluster-internal HTTP requests performed.\n", - "type": "integer" - }, - "intermediateCommits": { - "description": "The number of intermediate commits performed by the query. This is only non-zero\nfor write queries, and only for queries that reached either the `intermediateCommitSize`\nor `intermediateCommitCount` thresholds. Note: in a cluster, intermediate commits can happen\non each participating DB-Server.\n", - "type": "integer" - }, - "nodes": { - "description": "When the query is executed with the `profile` option set to at least `2`,\nthen this attribute contains runtime statistics per query execution node.\nFor a human readable output, you can execute\n`db._profileQuery(\u003cquery\u003e, \u003cbind-vars\u003e)` in arangosh.\n", - "items": { - "properties": { - "calls": { - "description": "The number of calls to this node.\n", - "type": "integer" - }, - "id": { - "description": "The execution node ID to correlate the statistics with the `plan` returned in\nthe `extra` attribute.\n", - "type": "integer" - }, - "items": { - "description": "The number of items returned by this node. Items are the temporary results\nreturned at this stage.\n", - "type": "integer" - }, - "runtime": { - "description": "The execution time of this node in seconds.\n", - "type": "number" - } - }, - "required": [ - "id", - "calls", - "items", - "runtime" - ], - "type": "object" - }, - "type": "array" - }, - "peakMemoryUsage": { - "description": "The maximum memory usage of the query while it was running. In a cluster,\nthe memory accounting is done per shard, and the memory usage reported is the peak\nmemory usage value from the individual shards.\nNote that to keep things lightweight, the per-query memory usage is tracked on a relatively\nhigh level, not including any memory allocator overhead nor any memory used for temporary\nresults calculations (e.g. memory allocated/deallocated inside AQL expressions and function\ncalls).\n", - "type": "integer" - }, - "scannedFull": { - "description": "The total number of documents iterated over when scanning a collection\nwithout an index. Documents scanned by subqueries are included in the result, but\noperations triggered by built-in or user-defined AQL functions are not.\n", - "type": "integer" - }, - "scannedIndex": { - "description": "The total number of documents iterated over when scanning a collection using\nan index. Documents scanned by subqueries are included in the result, but operations\ntriggered by built-in or user-defined AQL functions are not.\n", - "type": "integer" - }, - "writesExecuted": { - "description": "The total number of data-modification operations successfully executed.\n", - "type": "integer" - }, - "writesIgnored": { - "description": "The total number of data-modification operations that were unsuccessful,\nbut have been ignored because of the `ignoreErrors` query option.\n", - "type": "integer" - } - }, - "required": [ - "writesExecuted", - "writesIgnored", - "scannedFull", - "scannedIndex", - "cursorsCreated", - "cursorsRearmed", - "cacheHits", - "cacheMisses", - "filtered", - "httpRequests", - "executionTime", - "peakMemoryUsage" - ], - "type": "object" + "required": [ + "name", + "expression", + "overwrite" + ], + "type": "object" + }, + "type": "array" + }, + "count": { + "description": "The number of documents currently present in the collection.\n", + "type": "integer" + }, + "distributeShardsLike": { + "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", + "type": "string" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isDisjoint": { + "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "type": "boolean" + }, + "keyOptions": { + "description": "An object which contains key generation options.\n", + "properties": { + "allowUserKeys": { + "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", + "type": "boolean" }, - "warnings": { - "description": "A list of query warnings.\n", - "items": { - "properties": { - "code": { - "description": "An error code.\n", - "type": "integer" - }, - "message": { - "description": "A description of the problem.\n", - "type": "string" - } - }, - "required": [ - "code", - "message" - ], - "type": "object" - }, - "type": "array" + "increment": { + "description": "The increment value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "lastValue": { + "description": "The offset value of the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", + "type": "integer" + }, + "offset": { + "description": "The initial offset value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the key generator.\n", + "enum": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "type": "string" } }, "required": [ - "warnings", - "stats" + "type", + "allowUserKeys" ], "type": "object" }, - "hasMore": { - "description": "A boolean indicator whether there are more results\navailable for the cursor on the server.\n\nNote that even if `hasMore` returns `true`, the next call might still return no\ndocuments. Once `hasMore` is `false`, the cursor is exhausted and the client\ncan stop asking for more results.\n", - "type": "boolean" - }, - "id": { - "description": "The ID of the cursor for fetching more result batches.\n", + "name": { + "description": "The name of this collection.\n", "type": "string" }, - "nextBatchId": { - "description": "Only set if the `allowRetry` query option is enabled in v3.11.0.\nFrom v3.11.1 onward, this attribute is always set, except in the last batch.\n\nThe ID of the batch after the current one. The first batch has an ID of `1` and\nthe value is incremented by 1 with every batch. You can remember and use this\nbatch ID should retrieving the next batch fail. Use the\n`POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e` endpoint to ask for the batch again.\nYou can also request the next batch.\n", - "type": "string" + "numberOfShards": { + "description": "The number of shards of the collection. _(cluster only)_\n", + "type": "integer" }, - "result": { - "description": "An array of result documents for the current batch\n(might be empty if the query has no results).\n", + "replicationFactor": { + "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", + "type": "integer" + }, + "schema": { + "description": "An object that specifies the collection-level schema for documents.\n", + "type": "object" + }, + "shardKeys": { + "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", "items": { - "type": "" + "type": "string" }, "type": "array" - } - }, - "required": [ - "error", - "code", - "hasMore", - "cached" - ], - "type": "object" - } - } - }, - "description": "is returned if the result set can be created by the server.\n" - }, - "400": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "the HTTP status code\n", - "type": "integer" }, - "error": { - "description": "boolean flag to indicate that an error occurred (`true` in this case)\n", - "type": "boolean" + "shardingStrategy": { + "description": "The sharding strategy selected for the collection. _(cluster only)_\n", + "enum": [ + "community-compat", + "enterprise-compat", + "enterprise-smart-edge-compat", + "hash", + "enterprise-hash-smart-edge", + "enterprise-hex-smart-vertex" + ], + "type": "string" }, - "errorMessage": { - "description": "A descriptive error message.\n\nIf the query specification is complete, the server will process the query. If an\nerror occurs during query processing, the server will respond with *HTTP 400*.\nAgain, the body of the response will contain details about the error.\n", + "smartGraphAttribute": { + "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", "type": "string" }, - "errorNum": { - "description": "the server error number\n", + "smartJoinAttribute": { + "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "syncByRevision": { + "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", + "type": "boolean" + }, + "type": { + "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", + "type": "integer" + }, + "waitForSync": { + "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", + "type": "boolean" + }, + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", "type": "integer" } }, "required": [ + "count", "error", "code", - "errorNum", - "errorMessage" + "name", + "type", + "status", + "statusString", + "isSystem", + "id", + "globallyUniqueId", + "waitForSync", + "keyOptions", + "schema", + "computedValues", + "cacheEnabled", + "syncByRevision" ], "type": "object" } } }, - "description": "is returned if the JSON representation is malformed, the query specification is\nmissing from the request, or if the query is invalid.\n\nThe body of the response contains a JSON object with additional error\ndetails. The object has the following attributes:\n" + "description": "All properties of the collection but additionally the document `count`.\n" }, - "404": { - "description": "The server will respond with *HTTP 404* in case a non-existing collection is\naccessed in the query.\n" + "400": { + "description": "The `collection-name` parameter is missing.\n" }, - "405": { - "description": "The server will respond with *HTTP 405* if an unsupported HTTP method is used.\n" + "404": { + "description": "The collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, "410": { - "description": "The server will respond with *HTTP 410* if a server which processes the query\nor is the leader for a shard which is used in the query stops responding, but\nthe connection has not been closed.\n" - }, - "503": { - "description": "The server will respond with *HTTP 503* if a server which processes the query\nor is the leader for a shard which is used in the query is down, either for\ngoing through a restart, a failure or connectivity issues.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" } }, - "summary": "Create a cursor", + "summary": "Get the document count of a collection", "tags": [ - "Queries" + "Collections" ] } }, - "/_api/cursor/{cursor-identifier}": { - "delete": { - "description": "Deletes the cursor and frees the resources associated with it.\n\nThe cursor will automatically be destroyed on the server when the client has\nretrieved all documents from it. The client can also explicitly destroy the\ncursor at any earlier time using an HTTP DELETE request. The cursor id must\nbe included as part of the URL.\n\nNote: the server will also destroy abandoned cursors automatically after a\ncertain server-controlled timeout to avoid resource leakage.\n", - "operationId": "deleteAqlQueryCursor", + "/_db/{database-name}/_api/collection/{collection-name}/figures": { + "get": { + "description": "Get the number of documents and additional statistical information\nabout the collection.\n", + "operationId": "getCollectionFigures", "parameters": [ { - "description": "The id of the cursor\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "cursor-identifier", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "202": { - "description": "is returned if the server is aware of the cursor.\n" }, - "404": { - "description": "is returned if the server is not aware of the cursor. It is also\nreturned if a cursor is used after it has been destroyed.\n" - } - }, - "summary": "Delete a cursor", - "tags": [ - "Queries" - ] - }, - "post": { - "description": "If the cursor is still alive, returns an object with the next query result batch.\n\nIf the cursor is not fully consumed, the time-to-live for the cursor\nis renewed by this API call.\n", - "operationId": "getNextAqlQueryCursorBatch", - "parameters": [ { - "description": "The name of the cursor\n", + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", "in": "path", - "name": "cursor-identifier", + "name": "collection-name", "required": true, "schema": { "type": "string" } + }, + { + "description": "Setting `details` to `true` will return extended storage engine-specific\ndetails to the figures. The details are intended for debugging ArangoDB itself\nand their format is subject to change. By default, `details` is set to `false`,\nso no details are returned and the behavior is identical to previous versions\nof ArangoDB.\nPlease note that requesting `details` may cause additional load and thus have\nan impact on performance.\n", + "in": "query", + "name": "details", + "required": false, + "schema": { + "default": false, + "type": "boolean" + } } ], "responses": { @@ -7311,378 +6543,346 @@ "application/json": { "schema": { "properties": { - "cached": { - "description": "A boolean flag indicating whether the query result was served\nfrom the query cache or not. If the query result is served from the query\ncache, the `extra` return attribute will not contain any `stats` sub-attribute\nand no `profile` sub-attribute.\n", + "cacheEnabled": { + "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", "type": "boolean" }, "code": { - "description": "The HTTP status code.\n", + "description": "The HTTP response status code.\n", + "example": 200, "type": "integer" }, + "computedValues": { + "description": "A list of objects, each representing a computed value.\n", + "items": { + "properties": { + "computeOn": { + "description": "An array of strings that defines on which write operations the value is\ncomputed.\n", + "example": [ + "insert", + "update", + "replace" + ], + "items": { + "enum": [ + "insert", + "update", + "replace" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", + "type": "string" + }, + "failOnWarning": { + "description": "Whether the write operation fails if the expression produces a warning.\n", + "type": "boolean" + }, + "keepNull": { + "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", + "type": "boolean" + }, + "name": { + "description": "The name of the target attribute.\n", + "type": "string" + }, + "overwrite": { + "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" + } + }, + "required": [ + "name", + "expression", + "overwrite" + ], + "type": "object" + }, + "type": "array" + }, "count": { - "description": "The total number of result documents available (only\navailable if the query was executed with the `count` attribute set).\n", + "description": "The number of documents currently present in the collection.\n", "type": "integer" }, + "distributeShardsLike": { + "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", + "type": "string" + }, "error": { - "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "description": "A flag indicating that no error occurred.\n", + "example": false, "type": "boolean" }, - "extra": { - "description": "An optional JSON object with extra information about the query result.\n\nOnly delivered as part of the first batch, or the last batch in case of a cursor\nwith the `stream` option enabled.\n", + "figures": { + "description": "The metrics of the collection.\n", "properties": { - "plan": { - "description": "The execution plan.\n", + "indexes": { + "description": "The index metrics.\n", "properties": { - "collections": { - "description": "A list of the collections involved in the query. The list only includes the\ncollections that can statically be determined at query compile time.\n", - "items": { - "properties": { - "name": { - "description": "The collection name.\n", - "type": "string" - }, - "type": { - "description": "How the collection is used. Can be `\"read\"`, `\"write\"`, or `\"exclusive\"`.\n", - "type": "string" - } - }, - "required": [ - "name", - "type", - "name", - "type" - ], - "type": "object" - }, - "type": "array" - }, - "estimatedCost": { - "description": "The estimated cost of the query.\n", + "count": { + "description": "The total number of indexes defined for the collection, including the pre-defined\nindexes (e.g. primary index).\n", "type": "integer" }, - "estimatedNrItems": { - "description": "The estimated number of results.\n", + "size": { + "description": "The total memory allocated for indexes in bytes.\n", "type": "integer" - }, - "isModificationQuery": { - "description": "Whether the query contains write operations.\n", - "type": "boolean" - }, - "nodes": { - "description": "A nested list of the execution plan nodes.\n", - "items": { - "type": "object" - }, - "type": "array" - }, - "rules": { - "description": "A list with the names of the applied optimizer rules.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "variables": { - "description": "All of the query variables, including user-created and internal ones.\n", - "items": { - "type": "object" - }, - "type": "array" } }, "required": [ - "nodes", - "rules", - "collections", - "variables", - "estimatedCost", - "estimatedNrItems", - "isModificationQuery", - "nodes", - "rules", - "collections", - "variables", - "estimatedCost", - "estimatedNrItems", - "isModificationQuery" + "count", + "size" ], "type": "object" + } + }, + "required": [ + "indexes" + ], + "type": "object" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isDisjoint": { + "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "type": "boolean" + }, + "keyOptions": { + "description": "An object which contains key generation options.\n", + "properties": { + "allowUserKeys": { + "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", + "type": "boolean" }, - "profile": { - "description": "The duration of the different query execution phases in seconds.\n", - "properties": { - "executing": { - "description": "", - "type": "number" - }, - "finalizing": { - "description": "", - "type": "number" - }, - "initializing": { - "description": "", - "type": "number" - }, - "instantiating executors": { - "description": "", - "type": "number" - }, - "instantiating plan": { - "description": "", - "type": "number" - }, - "loading collections": { - "description": "", - "type": "number" - }, - "optimizing ast": { - "description": "", - "type": "number" - }, - "optimizing plan": { - "description": "", - "type": "number" - }, - "parsing": { - "description": "", - "type": "number" - } - }, - "required": [ - "initializing", - "parsing", - "optimizing ast", - "loading collections", - "instantiating plan", - "optimizing plan", - "instantiating executors", - "executing", - "finalizing", - "initializing", - "parsing", - "optimizing ast", - "loading collections", - "instantiating plan", - "optimizing plan", - "instantiating executors", - "executing", - "finalizing" - ], - "type": "object" + "increment": { + "description": "The increment value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" }, - "stats": { - "description": "An object with query statistics.\n", - "properties": { - "cacheHits": { - "description": "The total number of index entries read from in-memory caches for indexes\nof type edge or persistent. This value is only non-zero when reading from indexes\nthat have an in-memory cache enabled, and when the query allows using the in-memory\ncache (i.e. using equality lookups on all index attributes).\n", - "type": "integer" - }, - "cacheMisses": { - "description": "The total number of cache read attempts for index entries that could not\nbe served from in-memory caches for indexes of type edge or persistent. This value\nis only non-zero when reading from indexes that have an in-memory cache enabled, the\nquery allows using the in-memory cache (i.e. using equality lookups on all index attributes)\nand the looked up values are not present in the cache.\n", - "type": "integer" - }, - "cursorsCreated": { - "description": "The total number of cursor objects created during query execution. Cursor\nobjects are created for index lookups.\n", - "type": "integer" - }, - "cursorsRearmed": { - "description": "The total number of times an existing cursor object was repurposed.\nRepurposing an existing cursor object is normally more efficient compared to destroying an\nexisting cursor object and creating a new one from scratch.\n", - "type": "integer" - }, - "executionTime": { - "description": "The query execution time (wall-clock time) in seconds.\n", - "type": "number" - }, - "filtered": { - "description": "The total number of documents removed after executing a filter condition\nin a `FilterNode` or another node that post-filters data. Note that nodes of the\n`IndexNode` type can also filter documents by selecting only the required index range\nfrom a collection, and the `filtered` value only indicates how much filtering was done by a\npost filter in the `IndexNode` itself or following `FilterNode` nodes.\nNodes of the `EnumerateCollectionNode` and `TraversalNode` types can also apply\nfilter conditions and can report the number of filtered documents.\n", - "type": "integer" - }, - "fullCount": { - "description": "The total number of documents that matched the search condition if the query's\nfinal top-level `LIMIT` operation were not present.\nThis attribute may only be returned if the `fullCount` option was set when starting the\nquery and only contains a sensible value if the query contains a `LIMIT` operation on\nthe top level.\n", - "type": "integer" - }, - "httpRequests": { - "description": "The total number of cluster-internal HTTP requests performed.\n", - "type": "integer" - }, - "intermediateCommits": { - "description": "The number of intermediate commits performed by the query. This is only non-zero\nfor write queries, and only for queries that reached either the `intermediateCommitSize`\nor `intermediateCommitCount` thresholds. Note: in a cluster, intermediate commits can happen\non each participating DB-Server.\n", - "type": "integer" - }, - "nodes": { - "description": "When the query is executed with the `profile` option set to at least `2`,\nthen this attribute contains runtime statistics per query execution node.\nFor a human readable output, you can execute\n`db._profileQuery(\u003cquery\u003e, \u003cbind-vars\u003e)` in arangosh.\n", - "items": { - "properties": { - "calls": { - "description": "The number of calls to this node.\n", - "type": "integer" - }, - "id": { - "description": "The execution node ID to correlate the statistics with the `plan` returned in\nthe `extra` attribute.\n", - "type": "integer" - }, - "items": { - "description": "The number of items returned by this node. Items are the temporary results\nreturned at this stage.\n", - "type": "integer" - }, - "runtime": { - "description": "The execution time of this node in seconds.\n", - "type": "number" - } - }, - "required": [ - "id", - "calls", - "items", - "runtime", - "id", - "calls", - "items", - "runtime" - ], - "type": "object" - }, - "type": "array" - }, - "peakMemoryUsage": { - "description": "The maximum memory usage of the query while it was running. In a cluster,\nthe memory accounting is done per shard, and the memory usage reported is the peak\nmemory usage value from the individual shards.\nNote that to keep things lightweight, the per-query memory usage is tracked on a relatively\nhigh level, not including any memory allocator overhead nor any memory used for temporary\nresults calculations (e.g. memory allocated/deallocated inside AQL expressions and function\ncalls).\n", - "type": "integer" - }, - "scannedFull": { - "description": "The total number of documents iterated over when scanning a collection\nwithout an index. Documents scanned by subqueries are included in the result, but\noperations triggered by built-in or user-defined AQL functions are not.\n", - "type": "integer" - }, - "scannedIndex": { - "description": "The total number of documents iterated over when scanning a collection using\nan index. Documents scanned by subqueries are included in the result, but operations\ntriggered by built-in or user-defined AQL functions are not.\n", - "type": "integer" - }, - "writesExecuted": { - "description": "The total number of data-modification operations successfully executed.\n", - "type": "integer" - }, - "writesIgnored": { - "description": "The total number of data-modification operations that were unsuccessful,\nbut have been ignored because of the `ignoreErrors` query option.\n", - "type": "integer" - } - }, - "required": [ - "writesExecuted", - "writesIgnored", - "scannedFull", - "scannedIndex", - "cursorsCreated", - "cursorsRearmed", - "cacheHits", - "cacheMisses", - "filtered", - "httpRequests", - "executionTime", - "peakMemoryUsage", - "writesExecuted", - "writesIgnored", - "scannedFull", - "scannedIndex", - "cursorsCreated", - "cursorsRearmed", - "cacheHits", - "cacheMisses", - "filtered", - "httpRequests", - "executionTime", - "peakMemoryUsage" - ], - "type": "object" + "lastValue": { + "description": "The offset value of the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", + "type": "integer" }, - "warnings": { - "description": "A list of query warnings.\n", - "items": { - "properties": { - "code": { - "description": "An error code.\n", - "type": "integer" - }, - "message": { - "description": "A description of the problem.\n", - "type": "string" - } - }, - "required": [ - "code", - "message", - "code", - "message" - ], - "type": "object" - }, - "type": "array" + "offset": { + "description": "The initial offset value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the key generator.\n", + "enum": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "type": "string" } }, "required": [ - "warnings", - "stats", - "warnings", - "stats" + "type", + "allowUserKeys" ], "type": "object" }, - "hasMore": { - "description": "A boolean indicator whether there are more results\navailable for the cursor on the server.\n\nNote that even if `hasMore` returns `true`, the next call might still return no\ndocuments. Once `hasMore` is `false`, the cursor is exhausted and the client\ncan stop asking for more results.\n", - "type": "boolean" - }, - "id": { - "description": "The ID of the cursor for fetching more result batches.\n", + "name": { + "description": "The name of this collection.\n", "type": "string" }, - "nextBatchId": { - "description": "Only set if the `allowRetry` query option is enabled in v3.11.0.\nFrom v3.11.1 onward, this attribute is always set, except in the last batch.\n\nThe ID of the batch after the current one. The first batch has an ID of `1` and\nthe value is incremented by 1 with every batch. You can remember and use this\nbatch ID should retrieving the next batch fail. Use the\n`POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e` endpoint to ask for the batch again.\nYou can also request the next batch.\n", - "type": "string" + "numberOfShards": { + "description": "The number of shards of the collection. _(cluster only)_\n", + "type": "integer" }, - "result": { - "description": "An array of result documents for the current batch\n(might be empty if the query has no results).\n", + "replicationFactor": { + "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", + "type": "integer" + }, + "schema": { + "description": "An object that specifies the collection-level schema for documents.\n", + "type": "object" + }, + "shardKeys": { + "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", "items": { - "type": "" + "type": "string" }, "type": "array" + }, + "shardingStrategy": { + "description": "The sharding strategy selected for the collection. _(cluster only)_\n", + "enum": [ + "community-compat", + "enterprise-compat", + "enterprise-smart-edge-compat", + "hash", + "enterprise-hash-smart-edge", + "enterprise-hex-smart-vertex" + ], + "type": "string" + }, + "smartGraphAttribute": { + "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "smartJoinAttribute": { + "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "syncByRevision": { + "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", + "type": "boolean" + }, + "type": { + "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", + "type": "integer" + }, + "waitForSync": { + "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", + "type": "boolean" + }, + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", + "type": "integer" } }, "required": [ + "count", + "figures", "error", "code", - "hasMore", - "cached" + "name", + "type", + "status", + "statusString", + "isSystem", + "id", + "globallyUniqueId", + "waitForSync", + "keyOptions", + "schema", + "computedValues", + "cacheEnabled", + "syncByRevision" ], "type": "object" } } }, - "description": "The server will respond with *HTTP 200* in case of success.\n" + "description": "All properties of the collection but additionally the document `count`\nand collection `figures`.\n" }, "400": { - "description": "If the cursor identifier is omitted, the server will respond with *HTTP 404*.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" }, "404": { - "description": "If no cursor with the specified identifier can be found, the server will respond\nwith *HTTP 404*.\n" - }, - "410": { - "description": "The server will respond with *HTTP 410* if a server which processes the query\nor is the leader for a shard which is used in the query stops responding, but\nthe connection has not been closed.\n" - }, - "503": { - "description": "The server will respond with *HTTP 503* if a server which processes the query\nor is the leader for a shard which is used in the query is down, either for\ngoing through a restart, a failure or connectivity issues.\n" - } - }, - "summary": "Read the next batch from a cursor", - "tags": [ - "Queries" - ] - }, - "put": { - "description": "\u003e **WARNING:**\nThis endpoint is deprecated in favor its functionally equivalent POST counterpart.\n\n\nIf the cursor is still alive, returns an object with the following\nattributes:\n\n- `id`: a `cursor-identifier`\n- `result`: a list of documents for the current batch\n- `hasMore`: `false` if this was the last batch\n- `count`: if present the total number of elements\n- `code`: an HTTP status code\n- `error`: a boolean flag to indicate whether an error occurred\n- `errorNum`: a server error number (if `error` is `true`)\n- `errorMessage`: a descriptive error message (if `error` is `true`)\n- `extra`: an object with additional information about the query result, with\n the nested objects `stats` and `warnings`. Only delivered as part of the last\n batch in case of a cursor with the `stream` option enabled.\n\nNote that even if `hasMore` returns `true`, the next call might\nstill return no documents. If, however, `hasMore` is `false`, then\nthe cursor is exhausted. Once the `hasMore` attribute has a value of\n`false`, the client can stop.\n\nIf the cursor is not fully consumed, the time-to-live for the cursor\nis renewed by this API call.\n", - "operationId": "getNextAqlQueryCursorBatchPut", - "parameters": [ - { - "description": "The name of the cursor\n", + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" + } + }, + "summary": "Get the collection statistics", + "tags": [ + "Collections" + ] + } + }, + "/_db/{database-name}/_api/collection/{collection-name}/load": { + "put": { + "description": "\u003e **WARNING:**\nThe load function is deprecated from version 3.8.0 onwards and is a no-op\nfrom version 3.9.0 onwards. It should no longer be used, as it may be removed\nin a future version of ArangoDB.\n\n\nSince ArangoDB version 3.9.0 this API does nothing. Previously, it used to\nload a collection into memory.\n", + "operationId": "loadCollection", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "cursor-identifier", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", "required": true, "schema": { "type": "string" @@ -7691,45 +6891,167 @@ ], "responses": { "200": { - "description": "The server will respond with *HTTP 200* in case of success.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "count": { + "description": "The number of documents currently present in the collection.\n", + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "example": false, + "type": "boolean" + }, + "name": { + "description": "The name of the collection.\n", + "example": "coll", + "type": "string" + }, + "status": { + "description": "The status of the collection.\n- `3`: loaded\n- `5`: deleted\n\nEvery other status indicates a corrupted collection.\n", + "example": 3, + "type": "integer" + }, + "type": { + "description": "The type of the collection:\n- `0`: \"unknown\"\n- `2`: regular document collection\n- `3`: edge collection\n", + "example": 2, + "type": "integer" + } + }, + "required": [ + "error", + "code", + "name", + "type", + "isSystem", + "status", + "id", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "Returns the basic collection properties for compatibility reasons.\n" }, "400": { - "description": "If the cursor identifier is omitted, the server will respond with *HTTP 404*.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter or the `name` attribute is missing.\n" }, "404": { - "description": "If no cursor with the specified identifier can be found, the server will respond\nwith *HTTP 404*.\n" - }, - "410": { - "description": "The server will respond with *HTTP 410* if a server which processes the query\nor is the leader for a shard which is used in the query stops responding, but\nthe connection has not been closed.\n" - }, - "503": { - "description": "The server will respond with *HTTP 503* if a server which processes the query\nor is the leader for a shard which is used in the query is down, either for\ngoing through a restart, a failure or connectivity issues.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" } }, - "summary": "Read the next batch from a cursor (deprecated)", + "summary": "Load a collection", "tags": [ - "Queries" + "Collections" ] } }, - "/_api/cursor/{cursor-identifier}/{batch-identifier}": { - "post": { - "description": "You can use this endpoint to retry fetching the latest batch from a cursor.\nThe endpoint requires the `allowRetry` query option to be enabled for the cursor.\n\nCalling this endpoint with the last returned batch identifier returns the\nquery results for that same batch again. This does not advance the cursor.\nClient applications can use this to re-transfer a batch once more in case of\ntransfer errors.\n\nYou can also call this endpoint with the next batch identifier, i.e. the value\nreturned in the `nextBatchId` attribute of a previous request. This advances the\ncursor and returns the results of the next batch.\n\nFrom v3.11.1 onward, you may use this endpoint even if the `allowRetry`\nattribute is `false` to fetch the next batch, but you cannot request a batch\nagain unless you set it to `true`.\n\nNote that it is only supported to query the last returned batch identifier or\nthe directly following batch identifier. The latter is only supported if there\nare more results in the cursor (i.e. `hasMore` is `true` in the latest batch).\n\nNote that when the last batch has been consumed successfully by a client\napplication, it should explicitly delete the cursor to inform the server that it\nsuccessfully received and processed the batch so that the server can free up\nresources.\n\nThe time-to-live for the cursor is renewed by this API call.\n", - "operationId": "getPreviousAqlQueryCursorBatch", + "/_db/{database-name}/_api/collection/{collection-name}/loadIndexesIntoMemory": { + "put": { + "description": "You can call this endpoint to try to cache this collection's index entries in\nthe main memory. Index lookups served from the memory cache can be much faster\nthan lookups not stored in the cache, resulting in a performance boost.\n\nThe endpoint iterates over suitable indexes of the collection and stores the\nindexed values (not the entire document data) in memory. This is implemented for\nedge indexes only.\n\nThe endpoint returns as soon as the index warmup has been scheduled. The index\nwarmup may still be ongoing in the background, even after the return value has\nalready been sent. As all suitable indexes are scanned, it may cause significant\nI/O activity and background load.\n\nThis feature honors memory limits. If the indexes you want to load are smaller\nthan your memory limit, this feature guarantees that most index values are\ncached. If the index is greater than your memory limit, this feature fills\nup values up to this limit. You cannot control which indexes of the collection\nshould have priority over others.\n\nIt is guaranteed that the in-memory cache data is consistent with the stored\nindex data at all times.\n", + "operationId": "loadCollectionIndexes", "parameters": [ { - "description": "The ID of the cursor.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "cursor-identifier", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The ID of the batch. The first batch has an ID of `1` and the value is\nincremented by 1 with every batch. You can only request the latest batch again\n(or the next batch). Earlier batches are not kept on the server-side.\n", + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", "in": "path", - "name": "batch-identifier", + "name": "collection-name", "required": true, "schema": { "type": "string" @@ -7741,679 +7063,930 @@ "content": { "application/json": { "schema": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, "properties": { - "cached": { - "description": "A boolean flag indicating whether the query result was served\nfrom the query cache or not. If the query result is served from the query\ncache, the `extra` return attribute will not contain any `stats` sub-attribute\nand no `profile` sub-attribute.\n", + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, "type": "boolean" - }, + } + }, + "required": [ + "error", + "code", + "result" + ], + "result": { + "description": "The value `true`.\n", + "example": true, + "type": "boolean" + }, + "type": "object" + } + } + }, + "description": "The index loading has been scheduled for all suitable indexes.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { "code": { - "description": "The HTTP status code.\n", - "type": "integer" - }, - "count": { - "description": "The total number of result documents available (only\navailable if the query was executed with the `count` attribute set).\n", + "description": "The HTTP response status code.\n", + "example": 400, "type": "integer" }, "error": { - "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "description": "A flag indicating that an error occurred.\n", + "example": true, "type": "boolean" }, - "extra": { - "description": "An optional JSON object with extra information about the query result.\n\nOnly delivered as part of the first batch, or the last batch in case of a cursor\nwith the `stream` option enabled.\n", - "properties": { - "plan": { - "description": "The execution plan.\n", - "properties": { - "collections": { - "description": "A list of the collections involved in the query. The list only includes the\ncollections that can statically be determined at query compile time.\n", - "items": { - "properties": { - "name": { - "description": "The collection name.\n", - "type": "string" - }, - "type": { - "description": "How the collection is used. Can be `\"read\"`, `\"write\"`, or `\"exclusive\"`.\n", - "type": "string" - } - }, - "required": [ - "name", - "type", - "name", - "type", - "name", - "type" - ], - "type": "object" - }, - "type": "array" - }, - "estimatedCost": { - "description": "The estimated cost of the query.\n", - "type": "integer" - }, - "estimatedNrItems": { - "description": "The estimated number of results.\n", - "type": "integer" - }, - "isModificationQuery": { - "description": "Whether the query contains write operations.\n", - "type": "boolean" - }, - "nodes": { - "description": "A nested list of the execution plan nodes.\n", - "items": { - "type": "object" - }, - "type": "array" - }, - "rules": { - "description": "A list with the names of the applied optimizer rules.\n", - "items": { - "type": "string" - }, - "type": "array" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" + } + }, + "summary": "Load collection indexes into memory", + "tags": [ + "Collections" + ] + } + }, + "/_db/{database-name}/_api/collection/{collection-name}/properties": { + "get": { + "description": "Returns all properties of the specified collection.\n", + "operationId": "getCollectionProperties", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "cacheEnabled": { + "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", + "type": "boolean" + }, + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "computedValues": { + "description": "A list of objects, each representing a computed value.\n", + "items": { + "properties": { + "computeOn": { + "description": "An array of strings that defines on which write operations the value is\ncomputed.\n", + "example": [ + "insert", + "update", + "replace" + ], + "items": { + "enum": [ + "insert", + "update", + "replace" + ], + "type": "string" }, - "variables": { - "description": "All of the query variables, including user-created and internal ones.\n", - "items": { - "type": "object" - }, - "type": "array" - } + "type": "array", + "uniqueItems": true }, - "required": [ - "nodes", - "rules", - "collections", - "variables", - "estimatedCost", - "estimatedNrItems", - "isModificationQuery", - "nodes", - "rules", - "collections", - "variables", - "estimatedCost", - "estimatedNrItems", - "isModificationQuery", - "nodes", - "rules", - "collections", - "variables", - "estimatedCost", - "estimatedNrItems", - "isModificationQuery" - ], - "type": "object" - }, - "profile": { - "description": "The duration of the different query execution phases in seconds.\n", - "properties": { - "executing": { - "description": "", - "type": "number" - }, - "finalizing": { - "description": "", - "type": "number" - }, - "initializing": { - "description": "", - "type": "number" - }, - "instantiating executors": { - "description": "", - "type": "number" - }, - "instantiating plan": { - "description": "", - "type": "number" - }, - "loading collections": { - "description": "", - "type": "number" - }, - "optimizing ast": { - "description": "", - "type": "number" - }, - "optimizing plan": { - "description": "", - "type": "number" - }, - "parsing": { - "description": "", - "type": "number" - } + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", + "type": "string" }, - "required": [ - "initializing", - "parsing", - "optimizing ast", - "loading collections", - "instantiating plan", - "optimizing plan", - "instantiating executors", - "executing", - "finalizing", - "initializing", - "parsing", - "optimizing ast", - "loading collections", - "instantiating plan", - "optimizing plan", - "instantiating executors", - "executing", - "finalizing", - "initializing", - "parsing", - "optimizing ast", - "loading collections", - "instantiating plan", - "optimizing plan", - "instantiating executors", - "executing", - "finalizing" - ], - "type": "object" + "failOnWarning": { + "description": "Whether the write operation fails if the expression produces a warning.\n", + "type": "boolean" + }, + "keepNull": { + "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", + "type": "boolean" + }, + "name": { + "description": "The name of the target attribute.\n", + "type": "string" + }, + "overwrite": { + "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" + } }, - "stats": { - "description": "An object with query statistics.\n", - "properties": { - "cacheHits": { - "description": "The total number of index entries read from in-memory caches for indexes\nof type edge or persistent. This value is only non-zero when reading from indexes\nthat have an in-memory cache enabled, and when the query allows using the in-memory\ncache (i.e. using equality lookups on all index attributes).\n", - "type": "integer" - }, - "cacheMisses": { - "description": "The total number of cache read attempts for index entries that could not\nbe served from in-memory caches for indexes of type edge or persistent. This value\nis only non-zero when reading from indexes that have an in-memory cache enabled, the\nquery allows using the in-memory cache (i.e. using equality lookups on all index attributes)\nand the looked up values are not present in the cache.\n", - "type": "integer" - }, - "cursorsCreated": { - "description": "The total number of cursor objects created during query execution. Cursor\nobjects are created for index lookups.\n", - "type": "integer" - }, - "cursorsRearmed": { - "description": "The total number of times an existing cursor object was repurposed.\nRepurposing an existing cursor object is normally more efficient compared to destroying an\nexisting cursor object and creating a new one from scratch.\n", - "type": "integer" - }, - "executionTime": { - "description": "The query execution time (wall-clock time) in seconds.\n", - "type": "number" - }, - "filtered": { - "description": "The total number of documents removed after executing a filter condition\nin a `FilterNode` or another node that post-filters data. Note that nodes of the\n`IndexNode` type can also filter documents by selecting only the required index range\nfrom a collection, and the `filtered` value only indicates how much filtering was done by a\npost filter in the `IndexNode` itself or following `FilterNode` nodes.\nNodes of the `EnumerateCollectionNode` and `TraversalNode` types can also apply\nfilter conditions and can report the number of filtered documents.\n", - "type": "integer" - }, - "fullCount": { - "description": "The total number of documents that matched the search condition if the query's\nfinal top-level `LIMIT` operation were not present.\nThis attribute may only be returned if the `fullCount` option was set when starting the\nquery and only contains a sensible value if the query contains a `LIMIT` operation on\nthe top level.\n", - "type": "integer" - }, - "httpRequests": { - "description": "The total number of cluster-internal HTTP requests performed.\n", - "type": "integer" - }, - "intermediateCommits": { - "description": "The number of intermediate commits performed by the query. This is only non-zero\nfor write queries, and only for queries that reached either the `intermediateCommitSize`\nor `intermediateCommitCount` thresholds. Note: in a cluster, intermediate commits can happen\non each participating DB-Server.\n", - "type": "integer" - }, - "nodes": { - "description": "When the query is executed with the `profile` option set to at least `2`,\nthen this attribute contains runtime statistics per query execution node.\nFor a human readable output, you can execute\n`db._profileQuery(\u003cquery\u003e, \u003cbind-vars\u003e)` in arangosh.\n", - "items": { - "properties": { - "calls": { - "description": "The number of calls to this node.\n", - "type": "integer" - }, - "id": { - "description": "The execution node ID to correlate the statistics with the `plan` returned in\nthe `extra` attribute.\n", - "type": "integer" - }, - "items": { - "description": "The number of items returned by this node. Items are the temporary results\nreturned at this stage.\n", - "type": "integer" - }, - "runtime": { - "description": "The execution time of this node in seconds.\n", - "type": "number" - } - }, - "required": [ - "id", - "calls", - "items", - "runtime", - "id", - "calls", - "items", - "runtime", - "id", - "calls", - "items", - "runtime" - ], - "type": "object" - }, - "type": "array" - }, - "peakMemoryUsage": { - "description": "The maximum memory usage of the query while it was running. In a cluster,\nthe memory accounting is done per shard, and the memory usage reported is the peak\nmemory usage value from the individual shards.\nNote that to keep things lightweight, the per-query memory usage is tracked on a relatively\nhigh level, not including any memory allocator overhead nor any memory used for temporary\nresults calculations (e.g. memory allocated/deallocated inside AQL expressions and function\ncalls).\n", - "type": "integer" - }, - "scannedFull": { - "description": "The total number of documents iterated over when scanning a collection\nwithout an index. Documents scanned by subqueries are included in the result, but\noperations triggered by built-in or user-defined AQL functions are not.\n", - "type": "integer" - }, - "scannedIndex": { - "description": "The total number of documents iterated over when scanning a collection using\nan index. Documents scanned by subqueries are included in the result, but operations\ntriggered by built-in or user-defined AQL functions are not.\n", - "type": "integer" - }, - "writesExecuted": { - "description": "The total number of data-modification operations successfully executed.\n", - "type": "integer" - }, - "writesIgnored": { - "description": "The total number of data-modification operations that were unsuccessful,\nbut have been ignored because of the `ignoreErrors` query option.\n", - "type": "integer" - } - }, - "required": [ - "writesExecuted", - "writesIgnored", - "scannedFull", - "scannedIndex", - "cursorsCreated", - "cursorsRearmed", - "cacheHits", - "cacheMisses", - "filtered", - "httpRequests", - "executionTime", - "peakMemoryUsage", - "writesExecuted", - "writesIgnored", - "scannedFull", - "scannedIndex", - "cursorsCreated", - "cursorsRearmed", - "cacheHits", - "cacheMisses", - "filtered", - "httpRequests", - "executionTime", - "peakMemoryUsage", - "writesExecuted", - "writesIgnored", - "scannedFull", - "scannedIndex", - "cursorsCreated", - "cursorsRearmed", - "cacheHits", - "cacheMisses", - "filtered", - "httpRequests", - "executionTime", - "peakMemoryUsage" - ], - "type": "object" - }, - "warnings": { - "description": "A list of query warnings.\n", - "items": { - "properties": { - "code": { - "description": "An error code.\n", - "type": "integer" - }, - "message": { - "description": "A description of the problem.\n", - "type": "string" - } - }, - "required": [ - "code", - "message", - "code", - "message", - "code", - "message" - ], - "type": "object" - }, - "type": "array" - } + "required": [ + "name", + "expression", + "overwrite" + ], + "type": "object" }, - "required": [ - "warnings", - "stats", - "warnings", - "stats", - "warnings", - "stats" - ], - "type": "object" + "type": "array" }, - "hasMore": { - "description": "A boolean indicator whether there are more results\navailable for the cursor on the server.\n\nNote that even if `hasMore` returns `true`, the next call might still return no\ndocuments. Once `hasMore` is `false`, the cursor is exhausted and the client\ncan stop asking for more results.\n", + "distributeShardsLike": { + "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", + "type": "string" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, "type": "boolean" }, - "id": { - "description": "The ID of the cursor for fetching more result batches.\n", + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", "type": "string" }, - "nextBatchId": { - "description": "Only set if the `allowRetry` query option is enabled in v3.11.0.\nFrom v3.11.1 onward, this attribute is always set, except in the last batch.\n\nThe ID of the batch after the current one. The first batch has an ID of `1` and\nthe value is incremented by 1 with every batch. You can remember and use this\nbatch ID should retrieving the next batch fail. Use the\n`POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e` endpoint to ask for the batch again.\nYou can also request the next batch.\n", + "id": { + "description": "A unique identifier of the collection (deprecated).\n", "type": "string" }, - "result": { - "description": "An array of result documents for the current batch\n(might be empty if the query has no results).\n", - "items": { - "type": "" - }, - "type": "array" - } - }, - "required": [ - "error", - "code", - "hasMore", - "cached" - ], - "type": "object" - } - } - }, - "description": "The server responds with *HTTP 200* in case of success.\n" - }, - "400": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP status code.\n", - "type": "integer" + "isDisjoint": { + "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", + "type": "boolean" }, - "error": { - "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "isSmart": { + "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", "type": "boolean" }, - "errorMessage": { - "description": "A descriptive error message (if `error` is `true`).\n", + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "type": "boolean" + }, + "keyOptions": { + "description": "An object which contains key generation options.\n", + "properties": { + "allowUserKeys": { + "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", + "type": "boolean" + }, + "increment": { + "description": "The increment value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "lastValue": { + "description": "The offset value of the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", + "type": "integer" + }, + "offset": { + "description": "The initial offset value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the key generator.\n", + "enum": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "type": "string" + } + }, + "required": [ + "type", + "allowUserKeys" + ], + "type": "object" + }, + "name": { + "description": "The name of this collection.\n", "type": "string" }, - "errorNum": { - "description": "A server error number (if `error` is `true`).\n", + "numberOfShards": { + "description": "The number of shards of the collection. _(cluster only)_\n", + "type": "integer" + }, + "replicationFactor": { + "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", + "type": "integer" + }, + "schema": { + "description": "An object that specifies the collection-level schema for documents.\n", + "type": "object" + }, + "shardKeys": { + "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "shardingStrategy": { + "description": "The sharding strategy selected for the collection. _(cluster only)_\n", + "enum": [ + "community-compat", + "enterprise-compat", + "enterprise-smart-edge-compat", + "hash", + "enterprise-hash-smart-edge", + "enterprise-hex-smart-vertex" + ], + "type": "string" + }, + "smartGraphAttribute": { + "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "smartJoinAttribute": { + "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "syncByRevision": { + "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", + "type": "boolean" + }, + "type": { + "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", + "type": "integer" + }, + "waitForSync": { + "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", + "type": "boolean" + }, + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", "type": "integer" } }, "required": [ "error", "code", - "errorNum", - "errorMessage" + "name", + "type", + "status", + "statusString", + "isSystem", + "id", + "globallyUniqueId", + "waitForSync", + "keyOptions", + "schema", + "computedValues", + "cacheEnabled", + "syncByRevision" ], "type": "object" } } }, - "description": "If the cursor and the batch identifier are omitted, the server responds with\n*HTTP 400*.\n" - }, - "404": { - "description": "If no cursor with the specified identifier can be found, or if the requested\nbatch isn't available, the server responds with *HTTP 404*.\n" - }, - "410": { - "description": "The server responds with *HTTP 410* if a server which processes the query\nor is the leader for a shard which is used in the query stops responding, but\nthe connection has not been closed.\n" - }, - "503": { - "description": "The server responds with *HTTP 503* if a server which processes the query\nor is the leader for a shard which is used in the query is down, either for\ngoing through a restart, a failure or connectivity issues.\n" - } - }, - "summary": "Read a batch from the cursor again", - "tags": [ - "Queries" - ] - } - }, - "/_api/database": { - "get": { - "description": "Retrieves the list of all existing databases\n\n\u003e **INFO:**\nRetrieving the list of databases is only possible from within the `_system` database.\n", - "operationId": "listDatabases", - "responses": { - "200": { - "description": "is returned if the list of database was compiled successfully.\n" + "description": "All the collection properties.\n" }, "400": { - "description": "is returned if the request is invalid.\n" + "description": "If the `collection-name` placeholder is missing, then a *HTTP 400* is\nreturned.\n" }, - "403": { - "description": "is returned if the request was not executed in the `_system` database.\n" + "404": { + "description": "If the collection is unknown, then a *HTTP 404*\nis returned.\n" } }, - "summary": "List all databases", + "summary": "Get the properties of a collection", "tags": [ - "Databases" + "Collections" ] }, - "post": { - "description": "Creates a new database.\n\nThe response is a JSON object with the attribute `result` set to `true`.\n\n\u003e **INFO:**\nCreating a new database is only possible from within the `_system` database.\n", - "operationId": "createDatabase", + "put": { + "description": "Changes the properties of a collection. Only the provided attributes are\nupdated. Collection properties **cannot be changed** once a collection is\ncreated except for the listed properties, as well as the collection name via\nthe rename endpoint (but not in clusters).\n", + "operationId": "updateCollectionProperties", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", + "required": true, + "schema": { + "type": "string" + } + } + ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "name": { - "description": "Has to contain a valid database name. The name must conform to the selected\nnaming convention for databases. If the name contains Unicode characters, the\nname must be [NFC-normalized](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms).\nNon-normalized names will be rejected by arangod.\n", - "type": "string" - }, - "options": { - "description": "Optional object which can contain the following attributes:\n", - "properties": { - "replicationFactor": { - "description": "Default replication factor for new collections created in this database.\nSpecial values include \"satellite\", which will replicate the collection to\nevery DB-Server (Enterprise Edition only), and 1, which disables replication.\n_(cluster only)_\n", - "type": "integer" - }, - "sharding": { - "description": "The sharding method to use for new collections in this database. Valid values\nare: \"\", \"flexible\", or \"single\". The first two are equivalent. _(cluster only)_\n", - "type": "string" - }, - "writeConcern": { - "description": "Default write concern for new collections created in this database.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`. _(cluster only)_\n", - "type": "number" - } - }, - "type": "object" + "cacheEnabled": { + "description": "Whether the in-memory hash cache for documents should be enabled for this\ncollection. Can be controlled globally with the `--cache.size`\nstartup option. The cache can speed up repeated reads of the same documents via\ntheir document keys. If the same documents are not fetched often or are\nmodified frequently, then you may disable the cache to avoid the maintenance\ncosts.\n", + "type": "boolean" }, - "users": { - "description": "An array of user objects. The users will be granted *Administrate* permissions\nfor the new database. Users that do not exist yet will be created.\nIf `users` is not specified or does not contain any users, the default user\n`root` will be used to ensure that the new database will be accessible after it\nis created. The `root` user is created with an empty password should it not\nexist. Each user object can contain the following attributes:\n", + "computedValues": { + "description": "An optional list of objects, each representing a computed value.\n", "items": { "properties": { - "active": { - "description": "A flag indicating whether the user account should be activated or not.\nThe default value is `true`. If set to `false`, then the user won't be able to\nlog into the database. The default is `true`. The attribute is ignored for users\nthat already exist.\n", + "computeOn": { + "description": "An array of strings to define on which write operations the value shall be\ncomputed.\n", + "example": [ + "insert", + "update", + "replace" + ], + "items": { + "enum": [ + "insert", + "update", + "replace" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\nSee [Computed Value Expressions](https://docs.arangodb.com/3.12/concepts/data-structure/documents/computed-values/#computed-value-expressions) for details.\n", + "type": "string" + }, + "failOnWarning": { + "default": false, + "description": "Whether to let the write operation fail if the expression produces a warning.\n", "type": "boolean" }, - "extra": { - "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", - "type": "object" + "keepNull": { + "default": true, + "description": "Whether the target attribute shall be set if the expression evaluates to `null`.\nYou can set the option to `false` to not set (or unset) the target attribute if\nthe expression returns `null`.\n", + "type": "boolean" }, - "passwd": { - "description": "The user password as a string. If not specified, it will default to an empty\nstring. The attribute is ignored for users that already exist.\n", + "name": { + "description": "The name of the target attribute. Can only be a top-level attribute, but you\nmay return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`,\nor a shard key attribute.\n", "type": "string" }, - "username": { - "description": "Login name of an existing user or one to be created.\n", - "type": "string" + "overwrite": { + "description": "Whether the computed value shall take precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" } }, "required": [ - "username" + "name", + "expression", + "overwrite" ], "type": "object" }, "type": "array" + }, + "replicationFactor": { + "description": "In a cluster, this attribute determines how many copies\nof each shard are kept on different DB-Servers. The value 1 means that only one\ncopy (no synchronous replication) is kept. A value of k means that k-1 replicas\nare kept. For SatelliteCollections, it needs to be the string `\"satellite\"`,\nwhich matches the replication factor to the number of DB-Servers\n(Enterprise Edition only).\n\nAny two copies reside on different DB-Servers. Replication between them is\nsynchronous, that is, every write operation to the \"leader\" copy will be replicated\nto all \"follower\" replicas, before the write operation is reported successful.\n\nIf a server fails, this is detected automatically and one of the servers holding\ncopies take over, usually without an error being reported.\n", + "type": "integer" + }, + "schema": { + "description": "Optional object that specifies the collection level schema for\ndocuments. The attribute keys `rule`, `level` and `message` must follow the\nrules documented in [Document Schema Validation](https://docs.arangodb.com/3.12/concepts/data-structure/documents/schema-validation/)\n", + "type": "object" + }, + "waitForSync": { + "description": "If set to `true`, the data is synchronized to disk before returning from a\ndocument create, update, replace or removal operation.\n", + "type": "boolean" + }, + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", + "type": "integer" } }, - "required": [ - "name" - ], "type": "object" } } } }, - "responses": { - "201": { - "description": "is returned if the database was created successfully.\n" - }, - "400": { - "description": "is returned if the request parameters are invalid, if a database with the\nspecified name already exists, or if the configured limit to the number\nof databases has been reached.\n" - }, - "403": { - "description": "is returned if the request was not executed in the `_system` database.\n" - }, - "409": { - "description": "is returned if a database with the specified name already exists.\n" - } - }, - "summary": "Create a database", - "tags": [ - "Databases" - ] - } - }, - "/_api/database/current": { - "get": { - "description": "Retrieves the properties of the current database\n\nThe response is a JSON object with the following attributes:\n\n- `name`: the name of the current database\n- `id`: the id of the current database\n- `path`: the filesystem path of the current database\n- `isSystem`: whether or not the current database is the `_system` database\n- `sharding`: the default sharding method for collections created in this database\n- `replicationFactor`: the default replication factor for collections in this database\n- `writeConcern`: the default write concern for collections in this database\n", - "operationId": "getCurrentDatabase", - "responses": { - "200": { - "description": "is returned if the information was retrieved successfully.\n" - }, - "400": { - "description": "is returned if the request is invalid.\n" - }, - "404": { - "description": "is returned if the database could not be found.\n" - } - }, - "summary": "Get information about the current database", - "tags": [ - "Databases" - ] - } - }, - "/_api/database/user": { - "get": { - "description": "Retrieves the list of all databases the current user can access without\nspecifying a different username or password.\n", - "operationId": "listUserAccessibleDatabases", - "responses": { - "200": { - "description": "is returned if the list of database was compiled successfully.\n" - }, - "400": { - "description": "is returned if the request is invalid.\n" - } - }, - "summary": "List the accessible databases", - "tags": [ - "Databases" - ] - } - }, - "/_api/database/{database-name}": { - "delete": { - "description": "Drops the database along with all data stored in it.\n\n\u003e **INFO:**\nDropping a database is only possible from within the `_system` database.\nThe `_system` database itself cannot be dropped.\n", - "operationId": "deleteDatabase", - "parameters": [ - { - "description": "The name of the database\n", - "in": "path", - "name": "database-name", - "required": true, - "schema": { - "type": "string" - } - } - ], "responses": { "200": { - "description": "is returned if the database was dropped successfully.\n" - }, - "400": { - "description": "is returned if the request is malformed.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "cacheEnabled": { + "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", + "type": "boolean" + }, + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "computedValues": { + "description": "A list of objects, each representing a computed value.\n", + "items": { + "properties": { + "computeOn": { + "description": "An array of strings that defines on which write operations the value is\ncomputed.\n", + "example": [ + "insert", + "update", + "replace" + ], + "items": { + "enum": [ + "insert", + "update", + "replace" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", + "type": "string" + }, + "failOnWarning": { + "description": "Whether the write operation fails if the expression produces a warning.\n", + "type": "boolean" + }, + "keepNull": { + "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", + "type": "boolean" + }, + "name": { + "description": "The name of the target attribute.\n", + "type": "string" + }, + "overwrite": { + "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" + } + }, + "required": [ + "name", + "expression", + "overwrite" + ], + "type": "object" + }, + "type": "array" + }, + "distributeShardsLike": { + "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", + "type": "string" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isDisjoint": { + "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "type": "boolean" + }, + "keyOptions": { + "description": "An object which contains key generation options.\n", + "properties": { + "allowUserKeys": { + "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", + "type": "boolean" + }, + "increment": { + "description": "The increment value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "lastValue": { + "description": "The offset value of the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", + "type": "integer" + }, + "offset": { + "description": "The initial offset value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the key generator.\n", + "enum": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "type": "string" + } + }, + "required": [ + "type", + "allowUserKeys" + ], + "type": "object" + }, + "name": { + "description": "The name of this collection.\n", + "type": "string" + }, + "numberOfShards": { + "description": "The number of shards of the collection. _(cluster only)_\n", + "type": "integer" + }, + "replicationFactor": { + "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", + "type": "integer" + }, + "schema": { + "description": "An object that specifies the collection-level schema for documents.\n", + "type": "object" + }, + "shardKeys": { + "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "shardingStrategy": { + "description": "The sharding strategy selected for the collection. _(cluster only)_\n", + "enum": [ + "community-compat", + "enterprise-compat", + "enterprise-smart-edge-compat", + "hash", + "enterprise-hash-smart-edge", + "enterprise-hex-smart-vertex" + ], + "type": "string" + }, + "smartGraphAttribute": { + "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "smartJoinAttribute": { + "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "syncByRevision": { + "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", + "type": "boolean" + }, + "type": { + "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", + "type": "integer" + }, + "waitForSync": { + "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", + "type": "boolean" + }, + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "name", + "type", + "status", + "statusString", + "isSystem", + "id", + "globallyUniqueId", + "waitForSync", + "keyOptions", + "schema", + "computedValues", + "cacheEnabled", + "syncByRevision" + ], + "type": "object" + } + } + }, + "description": "The collection has been updated successfully.\n" }, - "403": { - "description": "is returned if the request was not executed in the `_system` database.\n" + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" }, "404": { - "description": "is returned if the database could not be found.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" } }, - "summary": "Drop a database", + "summary": "Change the properties of a collection", "tags": [ - "Databases" + "Collections" ] } }, - "/_api/document/{collection}": { - "delete": { - "description": "The body of the request is an array consisting of selectors for\ndocuments. A selector can either be a string with a key or a string\nwith a document identifier or an object with a `_key` attribute. This\nAPI call removes all specified documents from `collection`.\nIf the `ignoreRevs` query parameter is `false` and the\nselector is an object and has a `_rev` attribute, it is a\nprecondition that the actual revision of the removed document in the\ncollection is the specified one.\n\nThe body of the response is an array of the same length as the input\narray. For each input selector, the output contains a JSON object\nwith the information about the outcome of the operation. If no error\noccurred, then such an object has the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\nIn case of an error, the object has the `error` attribute set to `true`\nand `errorCode` set to the error code.\n\nIf the `waitForSync` parameter is not specified or set to `false`,\nthen the collection's default `waitForSync` behavior is applied.\nThe `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync`\nvalue of `true`.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 200 or 202, but the\n`X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", - "operationId": "deleteDocuments", + "/_db/{database-name}/_api/collection/{collection-name}/recalculateCount": { + "put": { + "description": "Recalculates the document count of a collection, if it ever becomes inconsistent.\n", + "operationId": "recalculateCollectionCount", "parameters": [ { - "description": "Collection from which documents are removed.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Wait until deletion operation has been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", - "in": "query", - "name": "returnOld", - "required": false, + "description": "The name of the collection.\n", + "in": "path", + "name": "collection-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "count": { + "description": "The recalculated document count.\nThis attribute is not present when using a cluster.\n", + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "result": { + "example": true, + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "result" + ], + "type": "object" + } + } + }, + "description": "The document count has been recalculated successfully.\n" }, - { - "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the deleted documents. If at least one of\nthe operations raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" + } + }, + "summary": "Recalculate the document count of a collection", + "tags": [ + "Collections" + ] + } + }, + "/_db/{database-name}/_api/collection/{collection-name}/rename": { + "put": { + "description": "Renames a collection.\n\n\u003e **INFO:**\nRenaming collections is not supported in cluster deployments.\n\n\nIf renaming the collection succeeds, then the collection is also renamed in\nall graph definitions inside the `_graphs` collection in the current database.\n", + "operationId": "renameCollection", + "parameters": [ { - "description": "If set to `true`, ignore any `_rev` attribute in the selectors. No\nrevision check is performed. If set to `false` then revisions are checked.\nThe default is `true`.\n", - "in": "query", - "name": "ignoreRevs", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Whether to delete existing entries from in-memory index caches and refill them\nif document removals affect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", - "required": false, + "description": "The name of the collection to rename.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], @@ -8422,13 +7995,13 @@ "application/json": { "schema": { "properties": { - "documents": { - "description": "A JSON representation of an array of document updates as objects. \nEach element has to contain a `_key` attribute.\n", - "type": "json" + "name": { + "description": "The new collection name.\n", + "type": "string" } }, "required": [ - "documents" + "name" ], "type": "object" } @@ -8437,248 +8010,167 @@ }, "responses": { "200": { - "description": "is returned if `waitForSync` was `true`.\n" - }, - "202": { - "description": "is returned if `waitForSync` was `false`.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "example": false, + "type": "boolean" + }, + "name": { + "description": "The name of the collection.\n", + "example": "coll", + "type": "string" + }, + "status": { + "description": "The status of the collection.\n- `3`: loaded\n- `5`: deleted\n\nEvery other status indicates a corrupted collection.\n", + "example": 3, + "type": "integer" + }, + "type": { + "description": "The type of the collection:\n- `0`: \"unknown\"\n- `2`: regular document collection\n- `3`: edge collection\n", + "example": 2, + "type": "integer" + } + }, + "required": [ + "error", + "code", + "name", + "type", + "isSystem", + "status", + "id", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "The collection has been renamed successfully.\n" }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter or the `name` attribute is missing.\n" }, "404": { - "description": "is returned if the collection was not found.\nThe response body contains an error document in this case.\n" - }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - } - }, - "summary": "Remove multiple documents", - "tags": [ - "Documents" - ] - }, - "patch": { - "description": "Partially updates documents, the documents to update are specified\nby the `_key` attributes in the body objects. The body of the\nrequest must contain a JSON array of document updates with the\nattributes to patch (the patch documents). All attributes from the\npatch documents are added to the existing documents if they do\nnot yet exist, and overwritten in the existing documents if they do\nexist there.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nSetting an attribute value to `null` in the patch documents causes a\nvalue of `null` to be saved for the attribute by default.\n\nIf `ignoreRevs` is `false` and there is a `_rev` attribute in a\ndocument in the body and its value does not match the revision of\nthe corresponding document in the database, the precondition is\nviolated.\n\nCluster only: The patch document _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a *not found* error\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nThe body of the response contains a JSON array of the same length\nas the input array with the information about the identifier and the\nrevision of the updated documents. Each element has the following\nattributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIn case of an error or violated precondition, an error\nobject with the attribute `error` set to `true` and the attribute\n`errorCode` set to the error code is built.\n\nIf the query parameter `returnOld` is `true`, then, for each\ngenerated document, the complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 201 or 202, but the\n`X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", - "operationId": "updateDocuments", - "parameters": [ - { - "description": "Name of the `collection` in which the documents are to be updated.\n", - "in": "path", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "If the intention is to delete existing attributes with the patch\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays).\n", - "in": "query", - "name": "keepNull", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Controls whether objects (not arrays) are merged if present in\nboth the existing and the patch document. If set to `false`, the\nvalue in the patch document overwrites the existing document's\nvalue. If set to `true`, objects are merged. The default is\n`true`.\n", - "in": "query", - "name": "mergeObjects", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Wait until the new documents have been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given documents are ignored. If this is set to `false`, then\nany `_rev` attribute given in a body document is taken as a\nprecondition. The document is only updated if the current revision\nis the one specified.\n", - "in": "query", - "name": "ignoreRevs", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete previous revision of the changed\ndocuments under the attribute `old` in the result.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete new documents under the attribute `new`\nin the result.\n", - "in": "query", - "name": "returnNew", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the updated documents. If at least one\noperation raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether to update existing entries in in-memory index caches if document updates\naffect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "documents": { - "description": "A JSON representation of an array of document updates as objects. \nEach element has to contain a `_key` attribute.\n", - "type": "json" - } - }, - "required": [ - "documents" - ], - "type": "object" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } } - } - } - }, - "responses": { - "201": { - "description": "is returned if `waitForSync` was `true` and operations were processed.\n" - }, - "202": { - "description": "is returned if `waitForSync` was `false` and operations were processed.\n" - }, - "400": { - "description": "is returned if the body does not contain a valid JSON representation\nof an array of documents. The response body contains\nan error document in this case.\n" - }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - }, - "404": { - "description": "is returned if the collection was not found.\n" - }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "description": "A collection called `collection-name` could not be found.\n" } }, - "summary": "Update multiple documents", + "summary": "Rename a collection", "tags": [ - "Documents" + "Collections" ] - }, - "post": { - "description": "Creates a new document from the document given in the body, unless there\nis already a document with the `_key` given. If no `_key` is given, a\nnew unique `_key` is generated automatically. The `_id` is automatically\nset in both cases, derived from the collection name and `_key`.\n\n\u003e **INFO:**\nAn `_id` or `_rev` attribute specified in the body is ignored.\n\n\nIf the document was created successfully, then the `Location` header\ncontains the path to the newly created document. The `ETag` header field\ncontains the revision of the document. Both are only set in the single\ndocument case.\n\nUnless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n\nIf the collection parameter `waitForSync` is `false`, then the call\nreturns as soon as the document has been accepted. It does not wait\nuntil the documents have been synced to disk.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document creation operation to disk even in\ncase that the `waitForSync` flag had been disabled for the entire\ncollection. Thus, the `waitForSync` query parameter can be used to\nforce synchronization of just this specific operations. To use this,\nset the `waitForSync` parameter to `true`. If the `waitForSync`\nparameter is not specified or set to `false`, then the collection's\ndefault `waitForSync` behavior is applied. The `waitForSync` query\nparameter cannot be used to disable synchronization for collections\nthat have a default `waitForSync` value of `true`.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n", - "operationId": "createDocument", + } + }, + "/_db/{database-name}/_api/collection/{collection-name}/responsibleShard": { + "put": { + "description": "Returns the ID of the shard that is responsible for the given document\n(if the document exists) or that would be responsible if such document\nexisted.\n\nThe request must body must contain a JSON document with at least the\ncollection's shard key attributes set to some values.\n\nThe response is a JSON object with a `shardId` attribute, which will\ncontain the ID of the responsible shard.\n\n\u003e **INFO:**\nThis method is only available in cluster deployments on Coordinators.\n", + "operationId": "getResponsibleShard", "parameters": [ { - "description": "Name of the `collection` in which the document is to be created.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Wait until document has been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Additionally return the complete new document under the attribute `new`\nin the result.\n", - "in": "query", - "name": "returnNew", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Additionally return the complete old document under the attribute `old`\nin the result. Only available if the overwrite option is used.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the created document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, the insert becomes a replace-insert. If a document with the\nsame `_key` already exists, the new document is not rejected with unique\nconstraint violation error but replaces the old document. Note that operations\nwith `overwrite` parameter require a `_key` attribute in the request payload,\ntherefore they can only be performed on collections sharded by `_key`.\n", - "in": "query", - "name": "overwrite", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "This option supersedes `overwrite` and offers the following modes:\n- `\"ignore\"`: if a document with the specified `_key` value exists already,\n nothing is done and no write operation is carried out. The\n insert operation returns success in this case. This mode does not\n support returning the old document version using `RETURN OLD`. When using\n `RETURN NEW`, `null` is returned in case the document already existed.\n- `\"replace\"`: if a document with the specified `_key` value exists already,\n it is overwritten with the specified document value. This mode is\n also used when no overwrite mode is specified but the `overwrite`\n flag is set to `true`.\n- `\"update\"`: if a document with the specified `_key` value exists already,\n it is patched (partially updated) with the specified document value.\n The overwrite mode can be further controlled via the `keepNull` and\n `mergeObjects` parameters.\n- `\"conflict\"`: if a document with the specified `_key` value exists already,\n return a unique constraint violation error so that the insert operation\n fails. This is also the default behavior in case the overwrite mode is\n not set, and the `overwrite` flag is `false` or not set either.\n", - "in": "query", - "name": "overwriteMode", - "required": false, + "description": "The name of the collection.\n", + "in": "path", + "name": "collection-name", + "required": true, "schema": { "type": "string" } - }, - { - "description": "If the intention is to delete existing attributes with the update-insert\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays). This option controls the update-insert behavior only.\n", - "in": "query", - "name": "keepNull", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Controls whether objects (not arrays) are merged if present in both, the\nexisting and the update-insert document. If set to `false`, the value in the\npatch document overwrites the existing document's value. If set to `true`,\nobjects are merged. The default is `true`.\nThis option controls the update-insert behavior only.\n", - "in": "query", - "name": "mergeObjects", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether to add new entries to in-memory index caches if document insertions\naffect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", - "required": false, - "schema": { - "type": "boolean" - } } ], "requestBody": { @@ -8687,7 +8179,7 @@ "schema": { "properties": { "document": { - "description": "A JSON representation of a single document.\n", + "description": "The request body must be a JSON object with at least the shard key\nattributes set to some values, but it may also be a full document.\n", "type": "object" } }, @@ -8700,586 +8192,1011 @@ } }, "responses": { - "201": { - "description": "is returned if the documents were created successfully and\n`waitForSync` was `true`.\n" - }, - "202": { - "description": "is returned if the documents were created successfully and\n`waitForSync` was `false`.\n" - }, - "400": { - "description": "is returned if the body does not contain a valid JSON representation\nof one document. The response body contains\nan error document in this case.\n" - }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - }, - "404": { - "description": "is returned if the collection specified by `collection` is unknown.\nThe response body contains an error document in this case.\n" - }, - "409": { - "description": "There are two possible reasons for this error in the single document case:\n\n- A document with the same qualifiers in an indexed attribute conflicts with an\n already existing document and thus violates the unique constraint.\n The response body contains an error document with the `errorNum` set to\n `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.\n- Locking the document key or some unique index entry failed to due to another\n concurrent operation that operates on the same document. This is also referred\n to as a _write-write conflict_. The response body contains an error document\n with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" - }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - } - }, - "summary": "Create a document", - "tags": [ - "Documents" - ] - }, - "put": { - "description": "Replaces multiple documents in the specified collection with the\nones in the body, the replaced documents are specified by the `_key`\nattributes in the body documents.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nIf `ignoreRevs` is `false` and there is a `_rev` attribute in a\ndocument in the body and its value does not match the revision of\nthe corresponding document in the database, the precondition is\nviolated.\n\nCluster only: The replace documents _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a `not found` error.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nThe body of the response contains a JSON array of the same length\nas the input array with the information about the identifier and the\nrevision of the replaced documents. In each element has the following\nattributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIn case of an error or violated precondition, an error\nobject with the attribute `error` set to `true` and the attribute\n`errorCode` set to the error code is built.\n\nIf the query parameter `returnOld` is `true`, then, for each\ngenerated document, the complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 201 or 202, but the\n`X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", - "operationId": "replaceDocuments", - "parameters": [ - { - "description": "This URL parameter is the name of the collection in which the\ndocuments are replaced.\n", - "in": "path", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "Wait until the new documents have been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given documents are ignored. If this is set to `false`, then\nany `_rev` attribute given in a body document is taken as a\nprecondition. The document is only replaced if the current revision\nis the one specified.\n", - "in": "query", - "name": "ignoreRevs", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete previous revision of the changed\ndocuments under the attribute `old` in the result.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete new documents under the attribute `new`\nin the result.\n", - "in": "query", - "name": "returnNew", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the replaced documents. If at least one\noperation raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether to update existing entries in in-memory index caches if documents\nreplacements affect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "documents": { - "description": "A JSON representation of an array of documents.\nEach element has to contain a `_key` attribute.\n", - "type": "json" - } - }, - "required": [ - "documents" - ], - "type": "object" + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "shardId": { + "description": "The ID of the responsible shard\n", + "type": "string" + } + }, + "required": [ + "error", + "code", + "shardId" + ], + "type": "object" + } } - } - } - }, - "responses": { - "201": { - "description": "is returned if `waitForSync` was `true` and operations were processed.\n" - }, - "202": { - "description": "is returned if `waitForSync` was `false` and operations were processed.\n" + }, + "description": "Returns the ID of the responsible shard.\n" }, "400": { - "description": "is returned if the body does not contain a valid JSON representation\nof an array of documents. The response body contains\nan error document in this case.\n" - }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing or not all of the\ncollection's shard key attributes are present in the input document.\n" }, "404": { - "description": "is returned if the collection was not found.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + "501": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 501, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The method has been called on a single server.\n" } }, - "summary": "Replace multiple documents", + "summary": "Get the responsible shard for a document", "tags": [ - "Documents" + "Collections" ] } }, - "/_api/document/{collection}#get": { - "put": { - "description": "\u003e **WARNING:**\nThe endpoint for getting multiple documents is the same as for replacing\nmultiple documents but with an additional query parameter:\n`PUT /_api/document/{collection}?onlyget=true`. This is because a lot of\nsoftware does not support payload bodies in `GET` requests.\n\n\nReturns the documents identified by their `_key` in the body objects.\nThe body of the request _must_ contain a JSON array of either\nstrings (the `_key` values to lookup) or search documents.\n\nA search document _must_ contain at least a value for the `_key` field.\nA value for `_rev` _may_ be specified to verify whether the document\nhas the same revision value, unless _ignoreRevs_ is set to false.\n\nCluster only: The search document _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a *not found* error.\n\nThe returned array of documents contain three special attributes: \n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n", - "operationId": "getDocuments", + "/_db/{database-name}/_api/collection/{collection-name}/revision": { + "get": { + "description": "The response contains the collection's latest used revision ID.\nThe revision ID is a server-generated string that clients can use to\ncheck whether data in a collection has changed since the last revision check.\n", + "operationId": "getCollectionRevision", "parameters": [ { - "description": "Name of the `collection` from which the documents are to be read.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "This parameter is required to be `true`, otherwise a replace\noperation is executed!\n", - "in": "query", - "name": "onlyget", + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", "required": true, "schema": { - "type": "boolean" - } - }, - { - "description": "Should the value be `true` (the default):\nIf a search document contains a value for the `_rev` field,\nthen the document is only returned if it has the same revision value.\nOtherwise a precondition failed error is returned.\n", - "in": "query", - "name": "ignoreRevs", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThe header is ignored if this operation is part of a Stream Transaction\n(`x-arango-trx-id` header). The header set when creating the transaction decides\nabout dirty reads for the entire transaction, not the individual read operations.\n", - "in": "header", - "name": "x-arango-allow-dirty-read", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", - "in": "header", - "name": "x-arango-trx-id", - "required": false, - "schema": { - "type": "string" + "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "documents": { - "description": "An array of documents to retrieve.\n", - "type": "json" - } - }, - "required": [ - "documents" - ], - "type": "object" - } - } - } - }, "responses": { "200": { - "description": "is returned if no error happened\n" + "content": { + "application/json": { + "schema": { + "properties": { + "cacheEnabled": { + "description": "Whether the in-memory hash cache for documents is enabled for this\ncollection.\n", + "type": "boolean" + }, + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "computedValues": { + "description": "A list of objects, each representing a computed value.\n", + "items": { + "properties": { + "computeOn": { + "description": "An array of strings that defines on which write operations the value is\ncomputed.\n", + "example": [ + "insert", + "update", + "replace" + ], + "items": { + "enum": [ + "insert", + "update", + "replace" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + "expression": { + "description": "An AQL `RETURN` operation with an expression that computes the desired value.\n", + "type": "string" + }, + "failOnWarning": { + "description": "Whether the write operation fails if the expression produces a warning.\n", + "type": "boolean" + }, + "keepNull": { + "description": "Whether the target attribute is set if the expression evaluates to `null`.\n", + "type": "boolean" + }, + "name": { + "description": "The name of the target attribute.\n", + "type": "string" + }, + "overwrite": { + "description": "Whether the computed value takes precedence over a user-provided or\nexisting attribute.\n", + "type": "boolean" + } + }, + "required": [ + "name", + "expression", + "overwrite" + ], + "type": "object" + }, + "type": "array" + }, + "distributeShardsLike": { + "description": "The name of another collection. This collection uses the `replicationFactor`,\n`numberOfShards` and `shardingStrategy` properties of the other collection and\nthe shards of this collection are distributed in the same way as the shards of\nthe other collection.\n", + "type": "string" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isDisjoint": { + "description": "Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint\n(Enterprise Edition only). This is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).\nThis is an internal property. _(cluster only)_\n", + "type": "boolean" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "type": "boolean" + }, + "keyOptions": { + "description": "An object which contains key generation options.\n", + "properties": { + "allowUserKeys": { + "description": "If set to `true`, then you are allowed to supply\nown key values in the `_key` attribute of a document. If set to\n`false`, then the key generator is solely responsible for\ngenerating keys and an error is raised if you supply own key values in the\n`_key` attribute of documents.\n\n\u003e **WARNING:**\nYou should not use both user-specified and automatically generated document keys\nin the same collection in cluster deployments for collections with more than a\nsingle shard. Mixing the two can lead to conflicts because Coordinators that\nauto-generate keys in this case are not aware of all keys which are already used.\n", + "type": "boolean" + }, + "increment": { + "description": "The increment value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "lastValue": { + "description": "The offset value of the `autoincrement` or `padded` key generator.\nThis is an internal property for restoring dumps properly.\n", + "type": "integer" + }, + "offset": { + "description": "The initial offset value for the `autoincrement` key generator.\nNot used by other key generator types.\n", + "type": "integer" + }, + "type": { + "description": "Specifies the type of the key generator.\n", + "enum": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "type": "string" + } + }, + "required": [ + "type", + "allowUserKeys" + ], + "type": "object" + }, + "name": { + "description": "The name of this collection.\n", + "type": "string" + }, + "numberOfShards": { + "description": "The number of shards of the collection. _(cluster only)_\n", + "type": "integer" + }, + "replicationFactor": { + "description": "Contains how many copies of each shard are kept on different DB-Servers.\nIt is an integer number in the range of 1-10 or the string `\"satellite\"`\nfor SatelliteCollections (Enterprise Edition only). _(cluster only)_\n", + "type": "integer" + }, + "revision": { + "description": "The collection revision ID as a string.\n", + "type": "string" + }, + "schema": { + "description": "An object that specifies the collection-level schema for documents.\n", + "type": "object" + }, + "shardKeys": { + "description": "Contains the names of document attributes that are used to\ndetermine the target shard for documents. _(cluster only)_\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "shardingStrategy": { + "description": "The sharding strategy selected for the collection. _(cluster only)_\n", + "enum": [ + "community-compat", + "enterprise-compat", + "enterprise-smart-edge-compat", + "hash", + "enterprise-hash-smart-edge", + "enterprise-hex-smart-vertex" + ], + "type": "string" + }, + "smartGraphAttribute": { + "description": "The attribute that is used for sharding: vertices with the same value of\nthis attribute are placed in the same shard. All vertices are required to\nhave this attribute set and it has to be a string. Edges derive the\nattribute from their connected vertices (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "smartJoinAttribute": { + "description": "Determines an attribute of the collection that must contain the shard key value\nof the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_\n", + "type": "string" + }, + "syncByRevision": { + "description": "Whether the newer revision-based replication protocol is\nenabled for this collection. This is an internal property.\n", + "type": "boolean" + }, + "type": { + "description": "The type of the collection:\n - `0`: \"unknown\"\n - `2`: regular document collection\n - `3`: edge collection\n", + "type": "integer" + }, + "waitForSync": { + "description": "If `true`, creating, changing, or removing\ndocuments waits until the data has been synchronized to disk.\n", + "type": "boolean" + }, + "writeConcern": { + "description": "Determines how many copies of each shard are required to be\nin-sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\n\nIf `distributeShardsLike` is set, the default `writeConcern`\nis that of the prototype collection.\nFor SatelliteCollections, the `writeConcern` is automatically controlled to\nequal the number of DB-Servers and has a value of `0`.\nOtherwise, the default value is controlled by the current database's\ndefault `writeConcern`, which uses the `--cluster.write-concern`\nstartup option as default, which defaults to `1`. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "revision", + "error", + "code", + "name", + "type", + "status", + "statusString", + "isSystem", + "id", + "globallyUniqueId", + "waitForSync", + "keyOptions", + "schema", + "computedValues", + "cacheEnabled", + "syncByRevision" + ], + "type": "object" + } + } + }, + "description": "All collection properties but additionally the collection `revision`.\n" }, "400": { - "description": "is returned if the body does not contain a valid JSON representation\nof an array of documents. The response body contains\nan error document in this case.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" }, "404": { - "description": "is returned if the collection was not found.\n" - } - }, - "summary": "Get multiple documents", - "tags": [ - "Documents" - ] + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" + } + }, + "summary": "Get the collection revision ID", + "tags": [ + "Collections" + ] } }, - "/_api/document/{collection}#multiple": { - "post": { - "description": "Creates new documents from the documents given in the body, unless there\nis already a document with the `_key` given. If no `_key` is given, a new\nunique `_key` is generated automatically. The `_id` is automatically\nset in both cases, derived from the collection name and `_key`.\n\nThe result body contains a JSON array of the\nsame length as the input array, and each entry contains the result\nof the operation for the corresponding input. In case of an error\nthe entry is a document with attributes `error` set to `true` and\nerrorCode set to the error code that has happened.\n\n\u003e **INFO:**\nAny `_id` or `_rev` attribute specified in the body is ignored.\n\n\nUnless `silent` is set to `true`, the body of the response contains an\narray of JSON objects with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n\nIf the collection parameter `waitForSync` is `false`, then the call\nreturns as soon as the documents have been accepted. It does not wait\nuntil the documents have been synced to disk.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document creation operation to disk even in\ncase that the `waitForSync` flag had been disabled for the entire\ncollection. Thus, the `waitForSync` query parameter can be used to\nforce synchronization of just this specific operations. To use this,\nset the `waitForSync` parameter to `true`. If the `waitForSync`\nparameter is not specified or set to `false`, then the collection's\ndefault `waitForSync` behavior is applied. The `waitForSync` query\nparameter cannot be used to disable synchronization for collections\nthat have a default `waitForSync` value of `true`.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n\nShould an error have occurred with some of the documents,\nthe `X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", - "operationId": "createDocuments", + "/_db/{database-name}/_api/collection/{collection-name}/shards": { + "get": { + "description": "Returns a JSON array with the shard IDs of the collection.\n\nIf the `details` parameter is set to `true`, it returns a JSON object with the\nshard IDs as object attribute keys, and the responsible servers for each shard mapped to them.\nIn the detailed response, the leader shards come first in the arrays.\n\n\u003e **INFO:**\nThis method is only available in cluster deployments on Coordinators.\n", + "operationId": "getCollectionShards", "parameters": [ { - "description": "Name of the `collection` in which the documents are to be created.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Wait until document has been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, + "description": "The name of the collection.\n", + "in": "path", + "name": "collection-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Additionally return the complete new document under the attribute `new`\nin the result.\n", + "description": "If set to true, the return value also contains the responsible servers for the collections' shards.\n", "in": "query", - "name": "returnNew", + "name": "details", "required": false, "schema": { + "default": false, "type": "boolean" } + } + ], + "responses": { + "200": { + "description": "Returns the collection's shards.\n" }, - { - "description": "Additionally return the complete old document under the attribute `old`\nin the result. Only available if the overwrite option is used.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" }, - { - "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the created documents. If any of the\noperations raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" }, + "501": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 501, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The method has been called on a single server.\n" + } + }, + "summary": "Get the shard IDs of a collection", + "tags": [ + "Collections" + ] + } + }, + "/_db/{database-name}/_api/collection/{collection-name}/truncate": { + "put": { + "description": "Removes all documents from the collection, but leaves the indexes intact.\n", + "operationId": "truncateCollection", + "parameters": [ { - "description": "If set to `true`, the insert becomes a replace-insert. If a document with the\nsame `_key` already exists, the new document is not rejected with a unique\nconstraint violation error but replaces the old document. Note that operations\nwith `overwrite` parameter require a `_key` attribute in the request payload,\ntherefore they can only be performed on collections sharded by `_key`.\n", - "in": "query", - "name": "overwrite", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "This option supersedes `overwrite` and offers the following modes:\n- `\"ignore\"`: if a document with the specified `_key` value exists already,\n nothing is done and no write operation is carried out. The\n insert operation returns success in this case. This mode does not\n support returning the old document version using `RETURN OLD`. When using\n `RETURN NEW`, `null` is returned in case the document already existed.\n- `\"replace\"`: if a document with the specified `_key` value exists already,\n it is overwritten with the specified document value. This mode is\n also used when no overwrite mode is specified but the `overwrite`\n flag is set to `true`.\n- `\"update\"`: if a document with the specified `_key` value exists already,\n it is patched (partially updated) with the specified document value.\n The overwrite mode can be further controlled via the `keepNull` and\n `mergeObjects` parameters.\n- `\"conflict\"`: if a document with the specified `_key` value exists already,\n return a unique constraint violation error so that the insert operation\n fails. This is also the default behavior in case the overwrite mode is\n not set, and the `overwrite` flag is `false` or not set either.\n", - "in": "query", - "name": "overwriteMode", - "required": false, + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", + "in": "path", + "name": "collection-name", + "required": true, "schema": { "type": "string" } }, { - "description": "If the intention is to delete existing attributes with the update-insert\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays). This option controls the update-insert behavior only.\n", + "description": "If set to `true`, the data is synchronized to disk before returning from the\ntruncate operation.\n", "in": "query", - "name": "keepNull", + "name": "waitForSync", "required": false, "schema": { + "default": false, "type": "boolean" } }, { - "description": "Controls whether objects (not arrays) are merged if present in both, the\nexisting and the update-insert document. If set to `false`, the value in the\npatch document overwrites the existing document's value. If set to `true`,\nobjects are merged. The default is `true`.\nThis option controls the update-insert behavior only.\n", + "description": "If set to `true`, the storage engine is told to start a compaction\nin order to free up disk space. This can be resource intensive. If the only\nintention is to start over with an empty collection, specify `false`.\n", "in": "query", - "name": "mergeObjects", + "name": "compact", "required": false, "schema": { + "default": true, "type": "boolean" } }, { - "description": "Whether to add new entries to in-memory index caches if document insertions\naffect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", "required": false, "schema": { - "type": "boolean" + "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "documents": { - "description": "An array of documents to create.\n", - "type": "json" - } - }, - "required": [ - "documents" - ], - "type": "object" - } - } - } - }, "responses": { - "201": { - "description": "is returned if `waitForSync` was `true` and operations were processed.\n" - }, - "202": { - "description": "is returned if `waitForSync` was `false` and operations were processed.\n" - }, - "400": { - "description": "is returned if the body does not contain a valid JSON representation\nof an array of documents. The response body contains\nan error document in this case.\n" - }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - }, - "404": { - "description": "is returned if the collection specified by `collection` is unknown.\nThe response body contains an error document in this case.\n" - }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + } + }, + "required": [ + "error", + "code", + "id" + ], + "type": "object" + } + } + }, + "description": "Truncating the collection was successful.\nReturns the basic information about the collection.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter is missing.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 410, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" } }, - "summary": "Create multiple documents", + "summary": "Truncate a collection", "tags": [ - "Documents" + "Collections" ] } }, - "/_api/document/{collection}/{key}": { - "delete": { - "description": "Unless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n\nIf the `waitForSync` parameter is not specified or set to `false`,\nthen the collection's default `waitForSync` behavior is applied.\nThe `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync`\nvalue of `true`.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n", - "operationId": "deleteDocument", + "/_db/{database-name}/_api/collection/{collection-name}/unload": { + "put": { + "description": "\u003e **WARNING:**\nThe unload function is deprecated from version 3.8.0 onwards and is a no-op\nfrom version 3.9.0 onwards. It should no longer be used, as it may be removed\nin a future version of ArangoDB.\n\n\nSince ArangoDB version 3.9.0 this API does nothing. Previously it used to\nunload a collection from memory, while preserving all documents.\n", + "operationId": "unloadCollection", "parameters": [ { - "description": "Name of the `collection` in which the document is to be deleted.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "collection", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The document key.\n", + "description": "The name of the collection.\n\n\u003e **WARNING:**\nAccessing collections by their numeric ID is deprecated from version 3.4.0 on.\nYou should reference them via their names instead.\n", "in": "path", - "name": "key", + "name": "collection-name", "required": true, "schema": { "type": "string" } - }, - { - "description": "Wait until deletion operation has been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the deleted document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether to delete existing entries from in-memory index caches and refill them\nif document removals affect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "You can conditionally remove a document based on a target revision id by\nusing the `if-match` HTTP header.\n", - "in": "header", - "name": "If-Match", - "required": false, - "schema": { - "type": "string" - } } ], "responses": { "200": { - "description": "is returned if the document was removed successfully and\n`waitForSync` was `true`.\n" - }, - "202": { - "description": "is returned if the document was removed successfully and\n`waitForSync` was `false`.\n" - }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - }, - "404": { - "description": "is returned if the collection or the document was not found.\nThe response body contains an error document in this case.\n" - }, - "409": { - "description": "is returned if locking the document key failed due to another\nconcurrent operation that operates on the same document.\nThis is also referred to as a _write-write conflict_.\nThe response body contains an error document with the\n`errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" - }, - "412": { - "description": "is returned if a \"If-Match\" header or `rev` is given and the found\ndocument has a different version. The response also contain the found\ndocument's current revision in the `_rev` attribute. Additionally, the\nattributes `_id` and `_key` are returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the collection. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the collection (deprecated).\n", + "type": "string" + }, + "isSystem": { + "description": "Whether the collection is a system collection. Collection names that starts with\nan underscore are usually system collections.\n", + "example": false, + "type": "boolean" + }, + "name": { + "description": "The name of the collection.\n", + "example": "coll", + "type": "string" + }, + "status": { + "description": "The status of the collection.\n- `3`: loaded\n- `5`: deleted\n\nEvery other status indicates a corrupted collection.\n", + "example": 3, + "type": "integer" + }, + "type": { + "description": "The type of the collection:\n- `0`: \"unknown\"\n- `2`: regular document collection\n- `3`: edge collection\n", + "example": 2, + "type": "integer" + } + }, + "required": [ + "error", + "code", + "name", + "type", + "isSystem", + "status", + "id", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "Returns the basic collection properties for compatibility reasons.\n" }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - } - }, - "summary": "Remove a document", - "tags": [ - "Documents" - ] - }, - "get": { - "description": "Returns the document identified by the collection name and document key.\nThe returned document contains three special attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n", - "operationId": "getDocument", - "parameters": [ - { - "description": "Name of the collection from which the document is to be read.\n", - "in": "path", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The document key.\n", - "in": "path", - "name": "key", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "If the \"If-None-Match\" header is given, then it must contain exactly one\nETag. The document is returned, if it has a different revision than the\ngiven ETag. Otherwise an *HTTP 304* is returned.\n", - "in": "header", - "name": "If-None-Match", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "If the \"If-Match\" header is given, then it must contain exactly one\nETag. The document is returned, if it has the same revision as the\ngiven ETag. Otherwise a *HTTP 412* is returned.\n", - "in": "header", - "name": "If-Match", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThe header is ignored if this operation is part of a Stream Transaction\n(`x-arango-trx-id` header). The header set when creating the transaction decides\nabout dirty reads for the entire transaction, not the individual read operations.\n", - "in": "header", - "name": "x-arango-allow-dirty-read", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", - "in": "header", - "name": "x-arango-trx-id", - "required": false, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "is returned if the document was found\n" - }, - "304": { - "description": "is returned if the \"If-None-Match\" header is given and the document has\nthe same version\n" + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `collection-name` parameter or the `name` attribute is missing.\n" }, "404": { - "description": "is returned if the document or collection was not found\n" - }, - "412": { - "description": "is returned if an \"If-Match\" header is given and the found\ndocument has a different version. The response will also contain the found\ndocument's current revision in the `_rev` attribute. Additionally, the\nattributes `_id` and `_key` will be returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A collection called `collection-name` could not be found.\n" } }, - "summary": "Get a document", + "summary": "Unload a collection", "tags": [ - "Documents" + "Collections" ] - }, - "head": { - "description": "Like `GET`, but only returns the header fields and not the body. You\ncan use this call to get the current revision of a document or check if\nthe document was deleted.\n", - "operationId": "getDocumentHeader", + } + }, + "/_db/{database-name}/_api/cursor": { + "post": { + "description": "Submits an AQL query for execution in the current database. The server returns\na result batch and may indicate that further batches need to be fetched using\na cursor identifier.\n\nThe query details include the query string plus optional query options and\nbind parameters. These values need to be passed in a JSON representation in\nthe body of the POST request.\n", + "operationId": "createAqlQueryCursor", "parameters": [ { - "description": "Name of the `collection` from which the document is to be read.\n", - "in": "path", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The document key.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "key", + "name": "database-name", "required": true, "schema": { "type": "string" } }, - { - "description": "If the \"If-None-Match\" header is given, then it must contain exactly one\nETag. If the current document revision is not equal to the specified ETag,\nan *HTTP 200* response is returned. If the current document revision is\nidentical to the specified ETag, then an *HTTP 304* is returned.\n", - "in": "header", - "name": "If-None-Match", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "If the \"If-Match\" header is given, then it must contain exactly one\nETag. The document is returned, if it has the same revision as the\ngiven ETag. Otherwise a *HTTP 412* is returned.\n", - "in": "header", - "name": "If-Match", - "required": false, - "schema": { - "type": "string" - } - }, { "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThe header is ignored if this operation is part of a Stream Transaction\n(`x-arango-trx-id` header). The header set when creating the transaction decides\nabout dirty reads for the entire transaction, not the individual read operations.\n", "in": "header", @@ -9299,141 +9216,142 @@ } } ], - "responses": { - "200": { - "description": "is returned if the document was found\n" - }, - "304": { - "description": "is returned if the \"If-None-Match\" header is given and the document has\nthe same version\n" - }, - "404": { - "description": "is returned if the document or collection was not found\n" - }, - "412": { - "description": "is returned if an \"If-Match\" header is given and the found\ndocument has a different version. The response will also contain the found\ndocument's current revision in the `ETag` header.\n" - } - }, - "summary": "Get a document header", - "tags": [ - "Documents" - ] - }, - "patch": { - "description": "Partially updates the document identified by the *document ID*.\nThe body of the request must contain a JSON document with the\nattributes to patch (the patch document). All attributes from the\npatch document are added to the existing document if they do not\nyet exist, and overwritten in the existing document if they do exist\nthere.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nSetting an attribute value to `null` in the patch document causes a\nvalue of `null` to be saved for the attribute by default.\n\nIf the `If-Match` header is specified and the revision of the\ndocument in the database is unequal to the given revision, the\nprecondition is violated.\n\nIf `If-Match` is not given and `ignoreRevs` is `false` and there\nis a `_rev` attribute in the body and its value does not match\nthe revision of the document in the database, the precondition is\nviolated.\n\nIf a precondition is violated, an *HTTP 412* is returned.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on `waitForSync`, see below),\nthe `ETag` header field contains the new revision of the document\n(in double quotes) and the `Location` header contains a complete URL\nunder which the document can be queried.\n\nCluster only: The patch document _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a `not found` error\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the updated document operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nUnless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then\nthe complete new document is returned under\nthe `new` attribute in the result.\n\nIf the document does not exist, then a *HTTP 404* is returned and the\nbody of the response contains an error document.\n", - "operationId": "updateDocument", - "parameters": [ - { - "description": "Name of the `collection` in which the document is to be updated.\n", - "in": "path", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The document key.\n", - "in": "path", - "name": "key", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "If the intention is to delete existing attributes with the patch\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays).\n", - "in": "query", - "name": "keepNull", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Controls whether objects (not arrays) are merged if present in\nboth the existing and the patch document. If set to `false`, the\nvalue in the patch document overwrites the existing document's\nvalue. If set to `true`, objects are merged. The default is\n`true`.\n", - "in": "query", - "name": "mergeObjects", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Wait until document has been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given document is ignored. If this is set to `false`, then\nthe `_rev` attribute given in the body document is taken as a\nprecondition. The document is only updated if the current revision\nis the one specified.\n", - "in": "query", - "name": "ignoreRevs", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete new document under the attribute `new`\nin the result.\n", - "in": "query", - "name": "returnNew", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the updated document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether to update existing entries in in-memory index caches if document updates\naffect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "You can conditionally update a document based on a target revision id by\nusing the `if-match` HTTP header.\n", - "in": "header", - "name": "If-Match", - "required": false, - "schema": { - "type": "string" - } - } - ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "document": { - "description": "A JSON representation of a document update as an object.\n", + "batchSize": { + "description": "maximum number of result documents to be transferred from\nthe server to the client in one roundtrip. If this attribute is\nnot set, a server-controlled default value will be used. A `batchSize` value of\n`0` is disallowed.\n", + "type": "integer" + }, + "bindVars": { + "description": "An object with key/value pairs representing the bind parameters.\nFor a bind variable `@var` in the query, specify the value using an attribute\nwith the name `var`. For a collection bind variable `@@coll`, use `@coll` as the\nattribute name. For example: `\"bindVars\": { \"var\": 42, \"@coll\": \"products\" }`.\n", + "type": "object" + }, + "count": { + "description": "indicates whether the number of documents in the result set should be returned in\nthe \"count\" attribute of the result.\nCalculating the \"count\" attribute might have a performance impact for some queries\nin the future so this option is turned off by default, and \"count\"\nis only returned when requested.\n", + "type": "boolean" + }, + "memoryLimit": { + "description": "the maximum number of memory (measured in bytes) that the query is allowed to\nuse. If set, then the query will fail with error \"resource limit exceeded\" in\ncase it allocates too much memory. A value of `0` indicates that there is no\nmemory limit.\n", + "type": "integer" + }, + "options": { + "description": "key/value object with extra options for the query.\n", + "properties": { + "allowDirtyReads": { + "description": "If you set this option to `true` and execute the query against a cluster\ndeployment, then the Coordinator is allowed to read from any shard replica and\nnot only from the leader.\n\nYou may observe data inconsistencies (dirty reads) when reading from followers,\nnamely obsolete revisions of documents because changes have not yet been\nreplicated to the follower, as well as changes to documents before they are\nofficially committed on the leader.\n\nThis feature is only available in the Enterprise Edition.\n", + "type": "boolean" + }, + "allowRetry": { + "description": "Set this option to `true` to make it possible to retry\nfetching the latest batch from a cursor. The default is `false`.\n\nIf retrieving a result batch fails because of a connection issue, you can ask\nfor that batch again using the `POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e`\nendpoint. The first batch has an ID of `1` and the value is incremented by 1\nwith every batch. Every result response except the last one also includes a\n`nextBatchId` attribute, indicating the ID of the batch after the current.\nYou can remember and use this batch ID should retrieving the next batch fail.\n\nYou can only request the latest batch again (or the next batch).\nEarlier batches are not kept on the server-side.\nRequesting a batch again does not advance the cursor.\n\nYou can also call this endpoint with the next batch identifier, i.e. the value\nreturned in the `nextBatchId` attribute of a previous request. This advances the\ncursor and returns the results of the next batch. This is only supported if there\nare more results in the cursor (i.e. `hasMore` is `true` in the latest batch).\n\nFrom v3.11.1 onward, you may use the `POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e`\nendpoint even if the `allowRetry` attribute is `false` to fetch the next batch,\nbut you cannot request a batch again unless you set it to `true`.\n\nTo allow refetching of the very last batch of the query, the server cannot\nautomatically delete the cursor. After the first attempt of fetching the last\nbatch, the server would normally delete the cursor to free up resources. As you\nmight need to reattempt the fetch, it needs to keep the final batch when the\n`allowRetry` option is enabled. Once you successfully received the last batch,\nyou should call the `DELETE /_api/cursor/\u003ccursor-id\u003e` endpoint so that the\nserver doesn't unnecessarily keep the batch until the cursor times out\n(`ttl` query option).\n", + "type": "boolean" + }, + "cache": { + "description": "Whether the [AQL query results cache](https://docs.arangodb.com/3.12/aql/execution-and-performance/caching-query-results/)\nshall be used for adding as well as for retrieving results.\n\nIf the query cache mode is set to `demand` and you set the `cache` query option\nto `true` for a query, then its query result is cached if it's eligible for\ncaching. If the query cache mode is set to `on`, query results are automatically\ncached if they are eligible for caching unless you set the `cache` option to `false`.\n\nIf you set the `cache` option to `false`, then any query cache lookup is skipped\nfor the query. If you set it to `true`, the query cache is checked for a cached result\n**if** the query cache mode is either set to `on` or `demand`.\n", + "type": "boolean" + }, + "failOnWarning": { + "description": "When set to `true`, the query will throw an exception and abort instead of producing\na warning. This option should be used during development to catch potential issues\nearly. When the attribute is set to `false`, warnings will not be propagated to\nexceptions and will be returned with the query result.\nThere is also a server configuration option `--query.fail-on-warning` for setting the\ndefault value for `failOnWarning` so it does not need to be set on a per-query level.\n", + "type": "boolean" + }, + "fillBlockCache": { + "description": "if set to `true` or not specified, this will make the query store the data it\nreads via the RocksDB storage engine in the RocksDB block cache. This is usually\nthe desired behavior. The option can be set to `false` for queries that are\nknown to either read a lot of data which would thrash the block cache, or for queries\nthat read data which are known to be outside of the hot set. By setting the option\nto `false`, data read by the query will not make it into the RocksDB block cache if\nnot already in there, thus leaving more room for the actual hot set.\n", + "type": "boolean" + }, + "fullCount": { + "description": "if set to `true` and the query contains a `LIMIT` clause, then the\nresult will have an `extra` attribute with the sub-attributes `stats`\nand `fullCount`, `{ ... , \"extra\": { \"stats\": { \"fullCount\": 123 } } }`.\nThe `fullCount` attribute will contain the number of documents in the result before the\nlast top-level LIMIT in the query was applied. It can be used to count the number of\ndocuments that match certain filter criteria, but only return a subset of them, in one go.\nIt is thus similar to MySQL's *SQL_CALC_FOUND_ROWS* hint. Note that setting the option\nwill disable a few LIMIT optimizations and may lead to more documents being processed,\nand thus make queries run longer. Note that the `fullCount` attribute may only\nbe present in the result if the query has a top-level LIMIT clause and the LIMIT\nclause is actually used in the query.\n", + "type": "boolean" + }, + "intermediateCommitCount": { + "description": "The maximum number of operations after which an intermediate commit is performed\nautomatically.\n", + "type": "integer" + }, + "intermediateCommitSize": { + "description": "The maximum total size of operations after which an intermediate commit is performed\nautomatically.\n", + "type": "integer" + }, + "maxDNFConditionMembers": { + "description": "A threshold for the maximum number of `OR` sub-nodes in the internal\nrepresentation of an AQL `FILTER` condition.\n\nYon can use this option to limit the computation time and memory usage when\nconverting complex AQL `FILTER` conditions into the internal DNF\n(disjunctive normal form) format. `FILTER` conditions with a lot of logical\nbranches (`AND`, `OR`, `NOT`) can take a large amount of processing time and\nmemory. This query option limits the computation time and memory usage for\nsuch conditions.\n\nOnce the threshold value is reached during the DNF conversion of a `FILTER`\ncondition, the conversion is aborted, and the query continues with a simplified\ninternal representation of the condition, which **cannot be used for index lookups**.\n\nYou can set the threshold globally instead of per query with the\n`--query.max-dnf-condition-members` startup option.\n", + "type": "integer" + }, + "maxNodesPerCallstack": { + "description": "The number of execution nodes in the query plan after that stack splitting is\nperformed to avoid a potential stack overflow. Defaults to the configured value\nof the startup option `--query.max-nodes-per-callstack`.\n\nThis option is only useful for testing and debugging and normally does not need\nany adjustment.\n", + "type": "integer" + }, + "maxNumberOfPlans": { + "description": "Limits the maximum number of plans that are created by the AQL query optimizer.\n", + "type": "integer" + }, + "maxRuntime": { + "description": "The query has to be executed within the given runtime or it is killed.\nThe value is specified in seconds. The default value is `0.0` (no timeout).\n", + "type": "number" + }, + "maxTransactionSize": { + "description": "The transaction size limit in bytes.\n", + "type": "integer" + }, + "maxWarningCount": { + "description": "Limits the maximum number of warnings a query will return. The number of warnings\na query will return is limited to 10 by default, but that number can be increased\nor decreased by setting this attribute.\n", + "type": "integer" + }, + "optimizer": { + "description": "Options related to the query optimizer.\n", + "properties": { + "rules": { + "description": "A list of to-be-included or to-be-excluded optimizer rules can be put into this\nattribute, telling the optimizer to include or exclude specific rules. To disable\na rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is\nalso a pseudo-rule `all`, which matches all optimizer rules. `-all` disables all rules.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "profile": { + "description": "If set to `true` or `1`, then the additional query profiling information is returned\nin the `profile` sub-attribute of the `extra` return attribute, unless the query result\nis served from the query results cache. If set to `2`, the query includes execution stats\nper query plan node in `stats.nodes` sub-attribute of the `extra` return attribute.\nAdditionally, the query plan is returned in the `extra.plan` sub-attribute.\n", + "type": "integer" + }, + "satelliteSyncWait": { + "description": "This *Enterprise Edition* parameter allows to configure how long a DB-Server has time\nto bring the SatelliteCollections involved in the query into sync.\nThe default value is `60.0` seconds. When the maximal time is reached, the query\nis stopped.\n", + "type": "number" + }, + "skipInaccessibleCollections": { + "description": "Let AQL queries (especially graph traversals) treat collections to which a user\nhas no access rights for as if these collections are empty. Instead of returning a\nforbidden access error, your queries execute normally. This is intended to help\nwith certain use-cases: A graph contains several collections and different users\nexecute AQL queries on that graph. You can naturally limit the accessible\nresults by changing the access rights of users on collections.\n\nThis feature is only available in the Enterprise Edition.\n", + "type": "boolean" + }, + "spillOverThresholdMemoryUsage": { + "description": "This option allows queries to store intermediate and final results temporarily\non disk if the amount of memory used (in bytes) exceeds the specified value.\nThis is used for decreasing the memory usage during the query execution.\n\nThis option only has an effect on queries that use the `SORT` operation but\nwithout a `LIMIT`, and if you enable the spillover feature by setting a path\nfor the directory to store the temporary data in with the\n`--temp.intermediate-results-path` startup option.\n\nDefault value: 128MB.\n\n\u003e **INFO:**\nSpilling data from RAM onto disk is an experimental feature and is turned off\nby default. The query results are still built up entirely in RAM on Coordinators\nand single servers for non-streaming queries. To avoid the buildup of\nthe entire query result in RAM, use a streaming query (see the `stream` option).\n", + "type": "integer" + }, + "spillOverThresholdNumRows": { + "description": "This option allows queries to store intermediate and final results temporarily\non disk if the number of rows produced by the query exceeds the specified value.\nThis is used for decreasing the memory usage during the query execution. In a\nquery that iterates over a collection that contains documents, each row is a\ndocument, and in a query that iterates over temporary values\n(i.e. `FOR i IN 1..100`), each row is one of such temporary values.\n\nThis option only has an effect on queries that use the `SORT` operation but\nwithout a `LIMIT`, and if you enable the spillover feature by setting a path\nfor the directory to store the temporary data in with the\n`--temp.intermediate-results-path` startup option.\n\nDefault value: `5000000` rows.\n\n\u003e **INFO:**\nSpilling data from RAM onto disk is an experimental feature and is turned off\nby default. The query results are still built up entirely in RAM on Coordinators\nand single servers for non-streaming queries. To avoid the buildup of\nthe entire query result in RAM, use a streaming query (see the `stream` option).\n", + "type": "integer" + }, + "stream": { + "description": "Can be enabled to execute the query lazily. If set to `true`, then the query is\nexecuted as long as necessary to produce up to `batchSize` results. These\nresults are returned immediately and the query is suspended until the client\nasks for the next batch (if there are more results). Depending on the query\nthis can mean that the first results will be available much faster and that\nless memory is needed because the server only needs to store a subset of\nresults at a time. Read-only queries can benefit the most, unless `SORT`\nwithout index or `COLLECT` are involved that make it necessary to process all\ndocuments before a partial result can be returned. It is advisable to only use\nthis option for queries without exclusive locks.\n\nRemarks:\n- The query will hold resources until it ends (such as RocksDB snapshots, which\n prevents compaction to some degree). Writes will be in memory until the query\n is committed.\n- If existing documents are modified, then write locks are held on these\n documents and other queries trying to modify the same documents will fail\n because of this conflict.\n- A streaming query may fail late because of a conflict or for other reasons\n after some batches were already returned successfully, possibly rendering the\n results up to that point meaningless.\n- The query options `cache`, `count` and `fullCount` are not supported for\n streaming queries.\n- Query statistics, profiling data and warnings are delivered as part of the\n last batch.\n\nIf the `stream` option is `false` (default), then the complete result of the\nquery is calculated before any of it is returned to the client. The server\nstores the full result in memory (on the contacted Coordinator if in a cluster).\nAll other resources are freed immediately (locks, RocksDB snapshots). The query\nwill fail before it returns results in case of a conflict.\n", + "type": "boolean" + }, + "usePlanCache": { + "default": false, + "description": "Set this option to `true` to utilize a cached query plan or add the execution plan\nof this query to the cache if it's not in the cache yet. Otherwise, the plan cache\nis bypassed (introduced in v3.12.4).\n\nQuery plan caching can reduce the total time for processing queries by avoiding\nto parse, plan, and optimize queries over and over again that effectively have\nthe same execution plan with at most some changes to bind parameter values.\n\nAn error is raised if a query doesn't meet the requirements for plan caching.\nSee [Cache eligibility](https://docs.arangodb.com/3.12/aql/execution-and-performance/caching-query-plans/#cache-eligibility)\nfor details.\n", + "type": "boolean" + } + }, "type": "object" + }, + "query": { + "description": "contains the query string to be executed\n", + "type": "string" + }, + "ttl": { + "description": "The time-to-live for the cursor (in seconds). If the result set is small enough\n(less than or equal to `batchSize`) then results are returned right away.\nOtherwise they are stored in memory and will be accessible via the cursor with\nrespect to the `ttl`. The cursor will be removed on the server automatically\nafter the specified amount of time. This is useful to ensure garbage collection\nof cursors that are not fully fetched by clients. If not set, a server-defined\nvalue will be used (default: 30 seconds).\nThe time-to-live is renewed upon every access to the cursor.\n", + "type": "integer" } }, "required": [ - "document" + "query" ], "type": "object" } @@ -9442,464 +9360,4986 @@ }, "responses": { "201": { - "description": "is returned if the document was updated successfully and\n`waitForSync` was `true`.\n" - }, - "202": { - "description": "is returned if the document was updated successfully and\n`waitForSync` was `false`.\n" - }, - "400": { - "description": "is returned if the body does not contain a valid JSON representation\nof a document. The response body contains\nan error document in this case.\n" - }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - }, - "404": { - "description": "is returned if the collection or the document was not found.\n" - }, - "409": { - "description": "There are two possible reasons for this error:\n\n- The update causes a unique constraint violation in a secondary index.\n The response body contains an error document with the `errorNum` set to\n `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.\n- Locking the document key or some unique index entry failed due to another\n concurrent operation that operates on the same document. This is also referred\n to as a _write-write conflict_. The response body contains an error document\n with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" - }, - "412": { - "description": "is returned if the precondition was violated. The response also contains\nthe found documents' current revisions in the `_rev` attributes.\nAdditionally, the attributes `_id` and `_key` are returned.\n" - }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - } - }, - "summary": "Update a document", - "tags": [ - "Documents" - ] - }, - "put": { - "description": "Replaces the specified document with the one in the body, provided there is\nsuch a document and no precondition is violated.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nIf the `If-Match` header is specified and the revision of the\ndocument in the database is unequal to the given revision, the\nprecondition is violated.\n\nIf `If-Match` is not given and `ignoreRevs` is `false` and there\nis a `_rev` attribute in the body and its value does not match\nthe revision of the document in the database, the precondition is\nviolated.\n\nIf a precondition is violated, an *HTTP 412* is returned.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on `waitForSync`, see below),\nthe `ETag` header field contains the new revision of the document\nand the `Location` header contains a complete URL under which the\ndocument can be queried.\n\nCluster only: The replace documents _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a *not found* error.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nUnless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then\nthe complete new document is returned under\nthe `new` attribute in the result.\n\nIf the document does not exist, then a *HTTP 404* is returned and the\nbody of the response contains an error document.\n", - "operationId": "replaceDocument", - "parameters": [ - { - "description": "Name of the `collection` in which the document is to be replaced.\n", - "in": "path", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The document key.\n", - "in": "path", - "name": "key", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "Wait until document has been synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given document is ignored. If this is set to `false`, then\nthe `_rev` attribute given in the body document is taken as a\nprecondition. The document is only replaced if the current revision\nis the one specified.\n", - "in": "query", - "name": "ignoreRevs", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Return additionally the complete new document under the attribute `new`\nin the result.\n", - "in": "query", - "name": "returnNew", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the replaced document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", - "in": "query", - "name": "silent", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether to update existing entries in in-memory index caches if documents\nreplacements affect the edge index or cache-enabled persistent indexes.\n", - "in": "query", - "name": "refillIndexCaches", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "You can conditionally replace a document based on a target revision id by\nusing the `if-match` HTTP header.\n", - "in": "header", - "name": "If-Match", - "required": false, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "document": { - "description": "A JSON representation of a single document.\n", - "type": "object" - } - }, - "required": [ - "document" - ], - "type": "object" - } - } - } - }, - "responses": { - "201": { - "description": "is returned if the document was replaced successfully and\n`waitForSync` was `true`.\n" - }, - "202": { - "description": "is returned if the document was replaced successfully and\n`waitForSync` was `false`.\n" - }, - "400": { - "description": "is returned if the body does not contain a valid JSON representation\nof a document. The response body contains\nan error document in this case.\n" - }, - "403": { - "description": "with the error code `1004` is returned if the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - }, - "404": { - "description": "is returned if the collection or the document was not found.\n" - }, - "409": { - "description": "There are two possible reasons for this error:\n\n- The replace operation causes a unique constraint violation in a secondary\n index. The response body contains an error document with the `errorNum` set to\n `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.\n- Locking the document key or some unique index entry failed due to another\n concurrent operation that operates on the same document. This is also referred\n to as a _write-write conflict_. The response body contains an error document\n with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" - }, - "412": { - "description": "is returned if the precondition is violated. The response also contains\nthe found documents' current revisions in the `_rev` attributes.\nAdditionally, the attributes `_id` and `_key` are returned.\n" - }, - "503": { - "description": "is returned if the system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" - } - }, - "summary": "Replace a document", - "tags": [ - "Documents" - ] - } - }, - "/_api/edges/{collection-id}": { - "get": { - "description": "Returns an array of edges starting or ending in the vertex identified by\n`vertex`.\n", - "operationId": "getVertexEdges", - "parameters": [ - { - "description": "The id of the collection.\n", - "in": "path", - "name": "collection-id", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The id of the start vertex.\n", - "in": "query", - "name": "vertex", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "Selects `in` or `out` direction for edges. If not set, any edges are\nreturned.\n", - "in": "query", - "name": "direction", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n", - "in": "header", - "name": "x-arango-allow-dirty-read", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "description": "is returned if the edge collection was found and edges were retrieved.\n" - }, - "400": { - "description": "is returned if the request contains invalid parameters.\n" - }, - "404": { - "description": "is returned if the edge collection was not found.\n" - } - }, - "summary": "Get inbound and outbound edges", - "tags": [ - "Graphs" - ] - } - }, - "/_api/endpoint": { - "get": { - "description": "\u003e **WARNING:**\nThis route should no longer be used.\nIt is considered as deprecated from version 3.4.0 on.\n\n\nReturns an array of all configured endpoints the server is listening on.\n\nThe result is a JSON array of JSON objects, each with `\"entrypoint\"` as\nthe only attribute, and with the value being a string describing the\nendpoint.\n\n\u003e **INFO:**\nRetrieving the array of all endpoints is allowed in the system database\nonly. Calling this action in any other database will make the server return\nan error.\n", - "operationId": "listEndpoints", - "responses": { - "200": { - "description": "is returned when the array of endpoints can be determined successfully.\n" - }, - "400": { - "description": "is returned if the action is not carried out in the system database.\n" - }, - "405": { - "description": "The server will respond with *HTTP 405* if an unsupported HTTP method is used.\n" - } - }, - "summary": "List the endpoints of a single server (deprecated)", - "tags": [ - "Administration" - ] - } - }, - "/_api/engine": { - "get": { - "description": "Returns the storage engine the server is configured to use.\nThe response is a JSON object with the following attributes:\n", - "operationId": "getEngine", - "responses": { - "200": { "content": { "application/json": { "schema": { "properties": { - "name": { - "description": "will be `rocksdb`\n", - "type": "string" - } - }, - "required": [ - "name" - ], - "type": "object" - } - } - }, - "description": "is returned in all cases.\n" - } - }, - "summary": "Get the storage engine type", - "tags": [ - "Administration" - ] - } - }, - "/_api/explain": { - "post": { - "description": "To explain how an AQL query would be executed on the server, the query string\ncan be sent to the server via an HTTP POST request. The server will then validate\nthe query and create an execution plan for it. The execution plan will be\nreturned, but the query will not be executed.\n\nThe execution plan that is returned by the server can be used to estimate the\nprobable performance of the query. Though the actual performance will depend\non many different factors, the execution plan normally can provide some rough\nestimates on the amount of work the server needs to do in order to actually run\nthe query.\n\nBy default, the explain operation will return the optimal plan as chosen by\nthe query optimizer The optimal plan is the plan with the lowest total estimated\ncost. The plan will be returned in the attribute `plan` of the response object.\nIf the option `allPlans` is specified in the request, the result will contain\nall plans created by the optimizer. The plans will then be returned in the\nattribute `plans`.\n\nThe result will also contain an attribute `warnings`, which is an array of\nwarnings that occurred during optimization or execution plan creation. Additionally,\na `stats` attribute is contained in the result with some optimizer statistics.\nIf `allPlans` is set to `false`, the result will contain an attribute `cacheable`\nthat states whether the query results can be cached on the server if the query\nresult cache were used. The `cacheable` attribute is not present when `allPlans`\nis set to `true`.\n\nEach plan in the result is a JSON object with the following attributes:\n- `nodes`: the array of execution nodes of the plan.\n\n- `estimatedCost`: the total estimated cost for the plan. If there are multiple\n plans, the optimizer will choose the plan with the lowest total cost.\n\n- `collections`: an array of collections used in the query\n\n- `rules`: an array of rules the optimizer applied.\n\n- `variables`: array of variables used in the query (note: this may contain\n internal variables created by the optimizer)\n", - "operationId": "explainAqlQuery", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "bindVars": { - "description": "An object with key/value pairs representing the bind parameters.\nFor a bind variable `@var` in the query, specify the value using an attribute\nwith the name `var`. For a collection bind variable `@@coll`, use `@coll` as the\nattribute name. For example: `\"bindVars\": { \"var\": 42, \"@coll\": \"products\" }`.\n", - "type": "object" - }, - "options": { - "description": "Options for the query\n", - "properties": { - "allPlans": { - "description": "if set to `true`, all possible execution plans will be returned.\nThe default is `false`, meaning only the optimal plan will be returned.\n", - "type": "boolean" - }, - "maxNumberOfPlans": { - "description": "an optional maximum number of plans that the optimizer is\nallowed to generate. Setting this attribute to a low value allows to put a\ncap on the amount of work the optimizer does.\n", - "type": "integer" - }, - "optimizer": { - "description": "Options related to the query optimizer.\n", - "properties": { - "rules": { - "description": "A list of to-be-included or to-be-excluded optimizer rules can be put into this\nattribute, telling the optimizer to include or exclude specific rules. To disable\na rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is\nalso a pseudo-rule `all`, which matches all optimizer rules. `-all` disables all rules.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - } + "cached": { + "description": "A boolean flag indicating whether the query result was served\nfrom the query results cache or not. If the query result is served from the query\ncache, the `extra` attribute in the response does not contain the `stats`\nand `profile` sub-attributes.\n", + "type": "boolean" }, - "type": "object" - }, - "query": { - "description": "the query which you want explained; If the query references any bind variables,\nthese must also be passed in the attribute `bindVars`. Additional\noptions for the query can be passed in the `options` attribute.\n", - "type": "string" - } - }, - "required": [ - "query" - ], - "type": "object" - } - } - } - }, - "responses": { - "200": { - "description": "If the query is valid, the server will respond with *HTTP 200* and\nreturn the optimal execution plan in the `plan` attribute of the response.\nIf option `allPlans` was set in the request, an array of plans will be returned\nin the `allPlans` attribute instead.\n" - }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\nor if the query contains a parse error. The body of the response will\ncontain the error details embedded in a JSON object.\nOmitting bind variables if the query references any will also result\nin an *HTTP 400* error.\n" - }, - "404": { - "description": "The server will respond with *HTTP 404* in case a non-existing collection is\naccessed in the query.\n" - } - }, - "summary": "Explain an AQL query", - "tags": [ - "Queries" - ] - } - }, - "/_api/foxx": { - "get": { - "description": "Fetches a list of services installed in the current database.\n\nReturns a list of objects with the following attributes:\n\n- `mount`: the mount path of the service\n- `development`: `true` if the service is running in development mode\n- `legacy`: `true` if the service is running in 2.8 legacy compatibility mode\n- `provides`: the service manifest's `provides` value or an empty object\n\nAdditionally the object may contain the following attributes if they have been set on the manifest:\n\n- `name`: a string identifying the service type\n- `version`: a semver-compatible version string\n", - "operationId": "listFoxxServices", - "parameters": [ - { - "description": "Whether or not system services should be excluded from the result.\n", - "in": "query", - "name": "excludeSystem", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "List the installed services", - "tags": [ - "Foxx" - ] - }, - "post": { - "description": "Installs the given new service at the given mount path.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- `configuration`: a JSON object describing configuration values\n- `dependencies`: a JSON object describing dependency settings\n- `source`: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the `source` field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf `source` is a URL, the URL must be reachable from the server.\nIf `source` is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple Coordinators\nthe file system path must resolve to equivalent files on every Coordinator.\n", + "code": { + "description": "The HTTP status code.\n", + "type": "integer" + }, + "count": { + "description": "The total number of result documents available (only\navailable if the query was executed with the `count` attribute set).\n", + "type": "integer" + }, + "error": { + "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "type": "boolean" + }, + "extra": { + "description": "An optional JSON object with extra information about the query result.\n\nOnly delivered as part of the first batch, or the last batch in case of a cursor\nwith the `stream` option enabled.\n", + "properties": { + "plan": { + "description": "The execution plan.\n", + "properties": { + "collections": { + "description": "A list of the collections involved in the query. The list only includes the\ncollections that can statically be determined at query compile time.\n", + "items": { + "properties": { + "name": { + "description": "The collection name.\n", + "type": "string" + }, + "type": { + "description": "How the collection is used.\n", + "enum": [ + "read", + "write", + "exclusive" + ], + "type": "string" + } + }, + "required": [ + "name", + "type" + ], + "type": "object" + }, + "type": "array" + }, + "estimatedCost": { + "description": "The estimated cost of the query.\n", + "type": "number" + }, + "estimatedNrItems": { + "description": "The estimated number of results.\n", + "type": "integer" + }, + "isModificationQuery": { + "description": "Whether the query contains write operations.\n", + "type": "boolean" + }, + "nodes": { + "description": "A nested list of the execution plan nodes.\n", + "items": { + "type": "object" + }, + "type": "array" + }, + "rules": { + "description": "A list with the names of the applied optimizer rules.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "variables": { + "description": "All of the query variables, including user-created and internal ones.\n", + "items": { + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "nodes", + "rules", + "collections", + "variables", + "estimatedCost", + "estimatedNrItems", + "isModificationQuery" + ], + "type": "object" + }, + "profile": { + "description": "The duration of the different query execution phases in seconds.\n", + "properties": { + "executing": { + "description": "", + "type": "number" + }, + "finalizing": { + "description": "", + "type": "number" + }, + "initializing": { + "description": "", + "type": "number" + }, + "instantiating executors": { + "description": "", + "type": "number" + }, + "instantiating plan": { + "description": "", + "type": "number" + }, + "loading collections": { + "description": "", + "type": "number" + }, + "optimizing ast": { + "description": "", + "type": "number" + }, + "optimizing plan": { + "description": "", + "type": "number" + }, + "parsing": { + "description": "", + "type": "number" + } + }, + "required": [ + "initializing", + "parsing", + "optimizing ast", + "loading collections", + "instantiating plan", + "optimizing plan", + "instantiating executors", + "executing", + "finalizing" + ], + "type": "object" + }, + "stats": { + "description": "An object with query statistics.\n", + "properties": { + "cacheHits": { + "description": "The total number of index entries read from in-memory caches for indexes\nof type edge or persistent. This value is only non-zero when reading from indexes\nthat have an in-memory cache enabled, and when the query allows using the in-memory\ncache (i.e. using equality lookups on all index attributes).\n", + "type": "integer" + }, + "cacheMisses": { + "description": "The total number of cache read attempts for index entries that could not\nbe served from in-memory caches for indexes of type edge or persistent. This value\nis only non-zero when reading from indexes that have an in-memory cache enabled, the\nquery allows using the in-memory cache (i.e. using equality lookups on all index attributes)\nand the looked up values are not present in the cache.\n", + "type": "integer" + }, + "cursorsCreated": { + "description": "The total number of cursor objects created during query execution. Cursor\nobjects are created for index lookups.\n", + "type": "integer" + }, + "cursorsRearmed": { + "description": "The total number of times an existing cursor object was repurposed.\nRepurposing an existing cursor object is normally more efficient compared to destroying an\nexisting cursor object and creating a new one from scratch.\n", + "type": "integer" + }, + "documentLookups": { + "description": "The number of real document lookups caused by late materialization\nas well as `IndexNode`s that had to load document attributes not covered\nby the index. This is how many documents had to be fetched from storage after\nan index scan that initially covered the attribute access for these documents.\n", + "type": "integer" + }, + "executionTime": { + "description": "The query execution time (wall-clock time) in seconds.\n", + "type": "number" + }, + "filtered": { + "description": "The total number of documents removed after executing a filter condition\nin a `FilterNode` or another node that post-filters data. Note that nodes of the\n`IndexNode` type can also filter documents by selecting only the required index range\nfrom a collection, and the `filtered` value only indicates how much filtering was done by a\npost filter in the `IndexNode` itself or following `FilterNode` nodes.\nNodes of the `EnumerateCollectionNode` and `TraversalNode` types can also apply\nfilter conditions and can report the number of filtered documents.\n", + "type": "integer" + }, + "fullCount": { + "description": "The total number of documents that matched the search condition if the query's\nfinal top-level `LIMIT` operation were not present.\nThis attribute may only be returned if the `fullCount` option was set when starting the\nquery and only contains a sensible value if the query contains a `LIMIT` operation on\nthe top level.\n", + "type": "integer" + }, + "httpRequests": { + "description": "The total number of cluster-internal HTTP requests performed.\n", + "type": "integer" + }, + "intermediateCommits": { + "description": "The number of intermediate commits performed by the query. This is only non-zero\nfor write queries, and only for queries that reached either the `intermediateCommitSize`\nor `intermediateCommitCount` thresholds. Note: in a cluster, intermediate commits can happen\non each participating DB-Server.\n", + "type": "integer" + }, + "nodes": { + "description": "When the query is executed with the `profile` option set to at least `2`,\nthen this attribute contains runtime statistics per query execution node.\nFor a human readable output, you can execute\n`db._profileQuery(\u003cquery\u003e, \u003cbind-vars\u003e)` in arangosh.\n", + "items": { + "properties": { + "calls": { + "description": "The number of calls to this node.\n", + "type": "integer" + }, + "id": { + "description": "The execution node ID to correlate the statistics with the `plan` returned in\nthe `extra` attribute.\n", + "type": "integer" + }, + "items": { + "description": "The number of items returned by this node. Items are the temporary results\nreturned at this stage.\n", + "type": "integer" + }, + "runtime": { + "description": "The execution time of this node in seconds.\n", + "type": "number" + } + }, + "required": [ + "id", + "calls", + "items", + "runtime" + ], + "type": "object" + }, + "type": "array" + }, + "peakMemoryUsage": { + "description": "The maximum memory usage of the query while it was running. In a cluster,\nthe memory accounting is done per shard, and the memory usage reported is the peak\nmemory usage value from the individual shards.\nNote that to keep things lightweight, the per-query memory usage is tracked on a relatively\nhigh level, not including any memory allocator overhead nor any memory used for temporary\nresults calculations (e.g. memory allocated/deallocated inside AQL expressions and function\ncalls).\n", + "type": "integer" + }, + "scannedFull": { + "description": "The total number of documents iterated over when scanning a collection\nwithout an index. Documents scanned by subqueries are included in the result, but\noperations triggered by built-in or user-defined AQL functions are not.\n", + "type": "integer" + }, + "scannedIndex": { + "description": "The total number of documents iterated over when scanning a collection using\nan index. Documents scanned by subqueries are included in the result, but operations\ntriggered by built-in or user-defined AQL functions are not.\n", + "type": "integer" + }, + "seeks": { + "description": "The number of seek calls done by RocksDB iterators for merge joins\n(`JoinNode` in the execution plan).\n", + "type": "integer" + }, + "writesExecuted": { + "description": "The total number of data-modification operations successfully executed.\n", + "type": "integer" + }, + "writesIgnored": { + "description": "The total number of data-modification operations that were unsuccessful,\nbut have been ignored because of the `ignoreErrors` query option.\n", + "type": "integer" + } + }, + "required": [ + "writesExecuted", + "writesIgnored", + "documentLookups", + "seeks", + "scannedFull", + "scannedIndex", + "cursorsCreated", + "cursorsRearmed", + "cacheHits", + "cacheMisses", + "filtered", + "httpRequests", + "executionTime", + "peakMemoryUsage", + "intermediateCommits" + ], + "type": "object" + }, + "warnings": { + "description": "A list of query warnings.\n", + "items": { + "properties": { + "code": { + "description": "An error code.\n", + "type": "integer" + }, + "message": { + "description": "A description of the problem.\n", + "type": "string" + } + }, + "required": [ + "code", + "message" + ], + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "warnings", + "stats" + ], + "type": "object" + }, + "hasMore": { + "description": "A boolean indicator whether there are more results\navailable for the cursor on the server.\n\nNote that even if `hasMore` returns `true`, the next call might still return no\ndocuments. Once `hasMore` is `false`, the cursor is exhausted and the client\ncan stop asking for more results.\n", + "type": "boolean" + }, + "id": { + "description": "The ID of the cursor for fetching more result batches.\n", + "type": "string" + }, + "nextBatchId": { + "description": "Only set if the `allowRetry` query option is enabled in v3.11.0.\nFrom v3.11.1 onward, this attribute is always set, except in the last batch.\n\nThe ID of the batch after the current one. The first batch has an ID of `1` and\nthe value is incremented by 1 with every batch. You can remember and use this\nbatch ID should retrieving the next batch fail. Use the\n`POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e` endpoint to ask for the batch again.\nYou can also request the next batch.\n", + "type": "string" + }, + "planCacheKey": { + "description": "The key of the plan cache entry. This attribute is only\npresent if a cached query execution plan has been used.\n", + "type": "string" + }, + "result": { + "description": "An array of result documents for the current batch\n(might be empty if the query has no results).\n", + "items": { + "type": "" + }, + "type": "array" + } + }, + "required": [ + "error", + "code", + "hasMore", + "cached" + ], + "type": "object" + } + } + }, + "description": "is returned if the result set can be created by the server.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "the HTTP status code\n", + "type": "integer" + }, + "error": { + "description": "boolean flag to indicate that an error occurred (`true` in this case)\n", + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n\nIf the query specification is complete, the server will process the query. If an\nerror occurs during query processing, the server will respond with *HTTP 400*.\nAgain, the body of the response will contain details about the error.\n", + "type": "string" + }, + "errorNum": { + "description": "the server error number\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The JSON representation is malformed, the query specification is\nmissing from the request, or the query is invalid.\n\nThe body of the response contains a JSON object with additional error\ndetails.\n" + }, + "404": { + "description": "A non-existing collection is accessed in the query.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "405": { + "description": "An unsupported HTTP method is used.\n" + }, + "410": { + "description": "A server which processes the query or the leader of a shard which is used\nin the query stops responding, but the connection has not been closed.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "503": { + "description": "A server which processes the query or the leader of a shard which is used\nin the query is down, either for going through a restart, a failure, or\nconnectivity issues.\n" + } + }, + "summary": "Create a cursor", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/cursor/{cursor-identifier}": { + "delete": { + "description": "Deletes the cursor and frees the resources associated with it.\n\nThe cursor will automatically be destroyed on the server when the client has\nretrieved all documents from it. The client can also explicitly destroy the\ncursor at any earlier time using an HTTP DELETE request. The cursor identifier must\nbe included as part of the URL.\n\nNote: the server will also destroy abandoned cursors automatically after a\ncertain server-controlled timeout to avoid resource leakage.\n", + "operationId": "deleteAqlQueryCursor", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The identifier of the cursor\n", + "in": "path", + "name": "cursor-identifier", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "202": { + "description": "The server is aware of the cursor.\n" + }, + "404": { + "description": "The server is not aware of the cursor. This is also\nreturned if a cursor is used after it has been destroyed.\n" + } + }, + "summary": "Delete a cursor", + "tags": [ + "Queries" + ] + }, + "post": { + "description": "If the cursor is still alive, returns an object with the next query result batch.\n\nIf the cursor is not fully consumed, the time-to-live for the cursor\nis renewed by this API call.\n", + "operationId": "getNextAqlQueryCursorBatch", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the cursor\n", + "in": "path", + "name": "cursor-identifier", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "cached": { + "description": "A boolean flag indicating whether the query result was served\nfrom the query results cache or not. If the query result is served from the query\ncache, the `extra` attribute in the response does not contain the `stats`\nand `profile` sub-attributes.\n", + "type": "boolean" + }, + "code": { + "description": "The HTTP status code.\n", + "type": "integer" + }, + "count": { + "description": "The total number of result documents available (only\navailable if the query was executed with the `count` attribute set).\n", + "type": "integer" + }, + "error": { + "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "type": "boolean" + }, + "extra": { + "description": "An optional JSON object with extra information about the query result.\n\nOnly delivered as part of the first batch, or the last batch in case of a cursor\nwith the `stream` option enabled.\n", + "properties": { + "plan": { + "description": "The execution plan.\n", + "properties": { + "collections": { + "description": "A list of the collections involved in the query. The list only includes the\ncollections that can statically be determined at query compile time.\n", + "items": { + "properties": { + "name": { + "description": "The collection name.\n", + "type": "string" + }, + "type": { + "description": "How the collection is used.\n", + "enum": [ + "read", + "write", + "exclusive" + ], + "type": "string" + } + }, + "required": [ + "name", + "type" + ], + "type": "object" + }, + "type": "array" + }, + "estimatedCost": { + "description": "The estimated cost of the query.\n", + "type": "number" + }, + "estimatedNrItems": { + "description": "The estimated number of results.\n", + "type": "integer" + }, + "isModificationQuery": { + "description": "Whether the query contains write operations.\n", + "type": "boolean" + }, + "nodes": { + "description": "A nested list of the execution plan nodes.\n", + "items": { + "type": "object" + }, + "type": "array" + }, + "rules": { + "description": "A list with the names of the applied optimizer rules.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "variables": { + "description": "All of the query variables, including user-created and internal ones.\n", + "items": { + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "nodes", + "rules", + "collections", + "variables", + "estimatedCost", + "estimatedNrItems", + "isModificationQuery" + ], + "type": "object" + }, + "profile": { + "description": "The duration of the different query execution phases in seconds.\n", + "properties": { + "executing": { + "description": "", + "type": "number" + }, + "finalizing": { + "description": "", + "type": "number" + }, + "initializing": { + "description": "", + "type": "number" + }, + "instantiating executors": { + "description": "", + "type": "number" + }, + "instantiating plan": { + "description": "", + "type": "number" + }, + "loading collections": { + "description": "", + "type": "number" + }, + "optimizing ast": { + "description": "", + "type": "number" + }, + "optimizing plan": { + "description": "", + "type": "number" + }, + "parsing": { + "description": "", + "type": "number" + } + }, + "required": [ + "initializing", + "parsing", + "optimizing ast", + "loading collections", + "instantiating plan", + "optimizing plan", + "instantiating executors", + "executing", + "finalizing" + ], + "type": "object" + }, + "stats": { + "description": "An object with query statistics.\n", + "properties": { + "cacheHits": { + "description": "The total number of index entries read from in-memory caches for indexes\nof type edge or persistent. This value is only non-zero when reading from indexes\nthat have an in-memory cache enabled, and when the query allows using the in-memory\ncache (i.e. using equality lookups on all index attributes).\n", + "type": "integer" + }, + "cacheMisses": { + "description": "The total number of cache read attempts for index entries that could not\nbe served from in-memory caches for indexes of type edge or persistent. This value\nis only non-zero when reading from indexes that have an in-memory cache enabled, the\nquery allows using the in-memory cache (i.e. using equality lookups on all index attributes)\nand the looked up values are not present in the cache.\n", + "type": "integer" + }, + "cursorsCreated": { + "description": "The total number of cursor objects created during query execution. Cursor\nobjects are created for index lookups.\n", + "type": "integer" + }, + "cursorsRearmed": { + "description": "The total number of times an existing cursor object was repurposed.\nRepurposing an existing cursor object is normally more efficient compared to destroying an\nexisting cursor object and creating a new one from scratch.\n", + "type": "integer" + }, + "documentLookups": { + "description": "The number of real document lookups caused by late materialization\nas well as `IndexNode`s that had to load document attributes not covered\nby the index. This is how many documents had to be fetched from storage after\nan index scan that initially covered the attribute access for these documents.\n", + "type": "integer" + }, + "executionTime": { + "description": "The query execution time (wall-clock time) in seconds.\n", + "type": "number" + }, + "filtered": { + "description": "The total number of documents removed after executing a filter condition\nin a `FilterNode` or another node that post-filters data. Note that nodes of the\n`IndexNode` type can also filter documents by selecting only the required index range\nfrom a collection, and the `filtered` value only indicates how much filtering was done by a\npost filter in the `IndexNode` itself or following `FilterNode` nodes.\nNodes of the `EnumerateCollectionNode` and `TraversalNode` types can also apply\nfilter conditions and can report the number of filtered documents.\n", + "type": "integer" + }, + "fullCount": { + "description": "The total number of documents that matched the search condition if the query's\nfinal top-level `LIMIT` operation were not present.\nThis attribute may only be returned if the `fullCount` option was set when starting the\nquery and only contains a sensible value if the query contains a `LIMIT` operation on\nthe top level.\n", + "type": "integer" + }, + "httpRequests": { + "description": "The total number of cluster-internal HTTP requests performed.\n", + "type": "integer" + }, + "intermediateCommits": { + "description": "The number of intermediate commits performed by the query. This is only non-zero\nfor write queries, and only for queries that reached either the `intermediateCommitSize`\nor `intermediateCommitCount` thresholds. Note: in a cluster, intermediate commits can happen\non each participating DB-Server.\n", + "type": "integer" + }, + "nodes": { + "description": "When the query is executed with the `profile` option set to at least `2`,\nthen this attribute contains runtime statistics per query execution node.\nFor a human readable output, you can execute\n`db._profileQuery(\u003cquery\u003e, \u003cbind-vars\u003e)` in arangosh.\n", + "items": { + "properties": { + "calls": { + "description": "The number of calls to this node.\n", + "type": "integer" + }, + "id": { + "description": "The execution node ID to correlate the statistics with the `plan` returned in\nthe `extra` attribute.\n", + "type": "integer" + }, + "items": { + "description": "The number of items returned by this node. Items are the temporary results\nreturned at this stage.\n", + "type": "integer" + }, + "runtime": { + "description": "The execution time of this node in seconds.\n", + "type": "number" + } + }, + "required": [ + "id", + "calls", + "items", + "runtime" + ], + "type": "object" + }, + "type": "array" + }, + "peakMemoryUsage": { + "description": "The maximum memory usage of the query while it was running. In a cluster,\nthe memory accounting is done per shard, and the memory usage reported is the peak\nmemory usage value from the individual shards.\nNote that to keep things lightweight, the per-query memory usage is tracked on a relatively\nhigh level, not including any memory allocator overhead nor any memory used for temporary\nresults calculations (e.g. memory allocated/deallocated inside AQL expressions and function\ncalls).\n", + "type": "integer" + }, + "scannedFull": { + "description": "The total number of documents iterated over when scanning a collection\nwithout an index. Documents scanned by subqueries are included in the result, but\noperations triggered by built-in or user-defined AQL functions are not.\n", + "type": "integer" + }, + "scannedIndex": { + "description": "The total number of documents iterated over when scanning a collection using\nan index. Documents scanned by subqueries are included in the result, but operations\ntriggered by built-in or user-defined AQL functions are not.\n", + "type": "integer" + }, + "seeks": { + "description": "The number of seek calls done by RocksDB iterators for merge joins\n(`JoinNode` in the execution plan).\n", + "type": "integer" + }, + "writesExecuted": { + "description": "The total number of data-modification operations successfully executed.\n", + "type": "integer" + }, + "writesIgnored": { + "description": "The total number of data-modification operations that were unsuccessful,\nbut have been ignored because of the `ignoreErrors` query option.\n", + "type": "integer" + } + }, + "required": [ + "writesExecuted", + "writesIgnored", + "documentLookups", + "seeks", + "scannedFull", + "scannedIndex", + "cursorsCreated", + "cursorsRearmed", + "cacheHits", + "cacheMisses", + "filtered", + "httpRequests", + "executionTime", + "peakMemoryUsage", + "intermediateCommits" + ], + "type": "object" + }, + "warnings": { + "description": "A list of query warnings.\n", + "items": { + "properties": { + "code": { + "description": "An error code.\n", + "type": "integer" + }, + "message": { + "description": "A description of the problem.\n", + "type": "string" + } + }, + "required": [ + "code", + "message" + ], + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "warnings", + "stats" + ], + "type": "object" + }, + "hasMore": { + "description": "A boolean indicator whether there are more results\navailable for the cursor on the server.\n\nNote that even if `hasMore` returns `true`, the next call might still return no\ndocuments. Once `hasMore` is `false`, the cursor is exhausted and the client\ncan stop asking for more results.\n", + "type": "boolean" + }, + "id": { + "description": "The ID of the cursor for fetching more result batches.\n", + "type": "string" + }, + "nextBatchId": { + "description": "Only set if the `allowRetry` query option is enabled in v3.11.0.\nFrom v3.11.1 onward, this attribute is always set, except in the last batch.\n\nThe ID of the batch after the current one. The first batch has an ID of `1` and\nthe value is incremented by 1 with every batch. You can remember and use this\nbatch ID should retrieving the next batch fail. Use the\n`POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e` endpoint to ask for the batch again.\nYou can also request the next batch.\n", + "type": "string" + }, + "planCacheKey": { + "description": "The key of the plan cache entry. This attribute is only\npresent if a cached query execution plan has been used.\n", + "type": "string" + }, + "result": { + "description": "An array of result documents for the current batch\n(might be empty if the query has no results).\n", + "items": { + "type": "" + }, + "type": "array" + } + }, + "required": [ + "error", + "code", + "hasMore", + "cached" + ], + "type": "object" + } + } + }, + "description": "Successfully fetched the batch.\n" + }, + "400": { + "description": "The cursor identifier is missing.\n" + }, + "404": { + "description": "A cursor with the specified identifier cannot found.\n" + }, + "410": { + "description": "A server which processes the query or the leader of a shard which is\nused in the query stops responding, but the connection has not been closed.\n" + }, + "503": { + "description": "A server which processes the query or the leader of a shard which is used\nin the query is down, either for going through a restart, a failure,\nor connectivity issues.\n" + } + }, + "summary": "Read the next batch from a cursor", + "tags": [ + "Queries" + ] + }, + "put": { + "description": "\u003e **WARNING:**\nThis endpoint is deprecated in favor its functionally equivalent POST counterpart.\n\n\nIf the cursor is still alive, returns an object with the following\nattributes:\n\n- `id`: a `cursor-identifier`\n- `result`: a list of documents for the current batch\n- `hasMore`: `false` if this was the last batch\n- `count`: if present the total number of elements\n- `code`: an HTTP status code\n- `error`: a boolean flag to indicate whether an error occurred\n- `errorNum`: a server error number (if `error` is `true`)\n- `errorMessage`: a descriptive error message (if `error` is `true`)\n- `extra`: an object with additional information about the query result, with\n the nested objects `stats` and `warnings`. Only delivered as part of the last\n batch in case of a cursor with the `stream` option enabled.\n\nNote that even if `hasMore` returns `true`, the next call might\nstill return no documents. If, however, `hasMore` is `false`, then\nthe cursor is exhausted. Once the `hasMore` attribute has a value of\n`false`, the client can stop.\n\nIf the cursor is not fully consumed, the time-to-live for the cursor\nis renewed by this API call.\n", + "operationId": "getNextAqlQueryCursorBatchPut", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the cursor\n", + "in": "path", + "name": "cursor-identifier", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Successfully fetched the batch.\n" + }, + "400": { + "description": "The cursor identifier is missing.\n" + }, + "404": { + "description": "A cursor with the specified identifier cannot be found.\n" + }, + "410": { + "description": "A server which processes the query or the leader of a shard which is\nused in the query stops responding, but the connection has not been closed.\n" + }, + "503": { + "description": "A server which processes the query or the leader of a shard which is used\nin the query is down, either for going through a restart, a failure,\nor connectivity issues.\n" + } + }, + "summary": "Read the next batch from a cursor (deprecated)", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/cursor/{cursor-identifier}/{batch-identifier}": { + "post": { + "description": "You can use this endpoint to retry fetching the latest batch from a cursor.\nThe endpoint requires the `allowRetry` query option to be enabled for the cursor.\n\nCalling this endpoint with the last returned batch identifier returns the\nquery results for that same batch again. This does not advance the cursor.\nClient applications can use this to re-transfer a batch once more in case of\ntransfer errors.\n\nYou can also call this endpoint with the next batch identifier, i.e. the value\nreturned in the `nextBatchId` attribute of a previous request. This advances the\ncursor and returns the results of the next batch.\n\nFrom v3.11.1 onward, you may use this endpoint even if the `allowRetry`\nattribute is `false` to fetch the next batch, but you cannot request a batch\nagain unless you set it to `true`.\n\nNote that it is only supported to query the last returned batch identifier or\nthe directly following batch identifier. The latter is only supported if there\nare more results in the cursor (i.e. `hasMore` is `true` in the latest batch).\n\nNote that when the last batch has been consumed successfully by a client\napplication, it should explicitly delete the cursor to inform the server that it\nsuccessfully received and processed the batch so that the server can free up\nresources.\n\nThe time-to-live for the cursor is renewed by this API call.\n", + "operationId": "getPreviousAqlQueryCursorBatch", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The ID of the cursor.\n", + "in": "path", + "name": "cursor-identifier", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The ID of the batch. The first batch has an ID of `1` and the value is\nincremented by 1 with every batch. You can only request the latest batch again\n(or the next batch). Earlier batches are not kept on the server-side.\n", + "in": "path", + "name": "batch-identifier", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "cached": { + "description": "A boolean flag indicating whether the query result was served\nfrom the query results cache or not. If the query result is served from the query\ncache, the `extra` attribute in the response does not contain the `stats`\nand `profile` sub-attributes.\n", + "type": "boolean" + }, + "code": { + "description": "The HTTP status code.\n", + "type": "integer" + }, + "count": { + "description": "The total number of result documents available (only\navailable if the query was executed with the `count` attribute set).\n", + "type": "integer" + }, + "error": { + "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "type": "boolean" + }, + "extra": { + "description": "An optional JSON object with extra information about the query result.\n\nOnly delivered as part of the first batch, or the last batch in case of a cursor\nwith the `stream` option enabled.\n", + "properties": { + "plan": { + "description": "The execution plan.\n", + "properties": { + "collections": { + "description": "A list of the collections involved in the query. The list only includes the\ncollections that can statically be determined at query compile time.\n", + "items": { + "properties": { + "name": { + "description": "The collection name.\n", + "type": "string" + }, + "type": { + "description": "How the collection is used.\n", + "enum": [ + "read", + "write", + "exclusive" + ], + "type": "string" + } + }, + "required": [ + "name", + "type" + ], + "type": "object" + }, + "type": "array" + }, + "estimatedCost": { + "description": "The estimated cost of the query.\n", + "type": "number" + }, + "estimatedNrItems": { + "description": "The estimated number of results.\n", + "type": "integer" + }, + "isModificationQuery": { + "description": "Whether the query contains write operations.\n", + "type": "boolean" + }, + "nodes": { + "description": "A nested list of the execution plan nodes.\n", + "items": { + "type": "object" + }, + "type": "array" + }, + "rules": { + "description": "A list with the names of the applied optimizer rules.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "variables": { + "description": "All of the query variables, including user-created and internal ones.\n", + "items": { + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "nodes", + "rules", + "collections", + "variables", + "estimatedCost", + "estimatedNrItems", + "isModificationQuery" + ], + "type": "object" + }, + "profile": { + "description": "The duration of the different query execution phases in seconds.\n", + "properties": { + "executing": { + "description": "", + "type": "number" + }, + "finalizing": { + "description": "", + "type": "number" + }, + "initializing": { + "description": "", + "type": "number" + }, + "instantiating executors": { + "description": "", + "type": "number" + }, + "instantiating plan": { + "description": "", + "type": "number" + }, + "loading collections": { + "description": "", + "type": "number" + }, + "optimizing ast": { + "description": "", + "type": "number" + }, + "optimizing plan": { + "description": "", + "type": "number" + }, + "parsing": { + "description": "", + "type": "number" + } + }, + "required": [ + "initializing", + "parsing", + "optimizing ast", + "loading collections", + "instantiating plan", + "optimizing plan", + "instantiating executors", + "executing", + "finalizing" + ], + "type": "object" + }, + "stats": { + "description": "An object with query statistics.\n", + "properties": { + "cacheHits": { + "description": "The total number of index entries read from in-memory caches for indexes\nof type edge or persistent. This value is only non-zero when reading from indexes\nthat have an in-memory cache enabled, and when the query allows using the in-memory\ncache (i.e. using equality lookups on all index attributes).\n", + "type": "integer" + }, + "cacheMisses": { + "description": "The total number of cache read attempts for index entries that could not\nbe served from in-memory caches for indexes of type edge or persistent. This value\nis only non-zero when reading from indexes that have an in-memory cache enabled, the\nquery allows using the in-memory cache (i.e. using equality lookups on all index attributes)\nand the looked up values are not present in the cache.\n", + "type": "integer" + }, + "cursorsCreated": { + "description": "The total number of cursor objects created during query execution. Cursor\nobjects are created for index lookups.\n", + "type": "integer" + }, + "cursorsRearmed": { + "description": "The total number of times an existing cursor object was repurposed.\nRepurposing an existing cursor object is normally more efficient compared to destroying an\nexisting cursor object and creating a new one from scratch.\n", + "type": "integer" + }, + "documentLookups": { + "description": "The number of real document lookups caused by late materialization\nas well as `IndexNode`s that had to load document attributes not covered\nby the index. This is how many documents had to be fetched from storage after\nan index scan that initially covered the attribute access for these documents.\n", + "type": "integer" + }, + "executionTime": { + "description": "The query execution time (wall-clock time) in seconds.\n", + "type": "number" + }, + "filtered": { + "description": "The total number of documents removed after executing a filter condition\nin a `FilterNode` or another node that post-filters data. Note that nodes of the\n`IndexNode` type can also filter documents by selecting only the required index range\nfrom a collection, and the `filtered` value only indicates how much filtering was done by a\npost filter in the `IndexNode` itself or following `FilterNode` nodes.\nNodes of the `EnumerateCollectionNode` and `TraversalNode` types can also apply\nfilter conditions and can report the number of filtered documents.\n", + "type": "integer" + }, + "fullCount": { + "description": "The total number of documents that matched the search condition if the query's\nfinal top-level `LIMIT` operation were not present.\nThis attribute may only be returned if the `fullCount` option was set when starting the\nquery and only contains a sensible value if the query contains a `LIMIT` operation on\nthe top level.\n", + "type": "integer" + }, + "httpRequests": { + "description": "The total number of cluster-internal HTTP requests performed.\n", + "type": "integer" + }, + "intermediateCommits": { + "description": "The number of intermediate commits performed by the query. This is only non-zero\nfor write queries, and only for queries that reached either the `intermediateCommitSize`\nor `intermediateCommitCount` thresholds. Note: in a cluster, intermediate commits can happen\non each participating DB-Server.\n", + "type": "integer" + }, + "nodes": { + "description": "When the query is executed with the `profile` option set to at least `2`,\nthen this attribute contains runtime statistics per query execution node.\nFor a human readable output, you can execute\n`db._profileQuery(\u003cquery\u003e, \u003cbind-vars\u003e)` in arangosh.\n", + "items": { + "properties": { + "calls": { + "description": "The number of calls to this node.\n", + "type": "integer" + }, + "id": { + "description": "The execution node ID to correlate the statistics with the `plan` returned in\nthe `extra` attribute.\n", + "type": "integer" + }, + "items": { + "description": "The number of items returned by this node. Items are the temporary results\nreturned at this stage.\n", + "type": "integer" + }, + "runtime": { + "description": "The execution time of this node in seconds.\n", + "type": "number" + } + }, + "required": [ + "id", + "calls", + "items", + "runtime" + ], + "type": "object" + }, + "type": "array" + }, + "peakMemoryUsage": { + "description": "The maximum memory usage of the query while it was running. In a cluster,\nthe memory accounting is done per shard, and the memory usage reported is the peak\nmemory usage value from the individual shards.\nNote that to keep things lightweight, the per-query memory usage is tracked on a relatively\nhigh level, not including any memory allocator overhead nor any memory used for temporary\nresults calculations (e.g. memory allocated/deallocated inside AQL expressions and function\ncalls).\n", + "type": "integer" + }, + "scannedFull": { + "description": "The total number of documents iterated over when scanning a collection\nwithout an index. Documents scanned by subqueries are included in the result, but\noperations triggered by built-in or user-defined AQL functions are not.\n", + "type": "integer" + }, + "scannedIndex": { + "description": "The total number of documents iterated over when scanning a collection using\nan index. Documents scanned by subqueries are included in the result, but operations\ntriggered by built-in or user-defined AQL functions are not.\n", + "type": "integer" + }, + "seeks": { + "description": "The number of seek calls done by RocksDB iterators for merge joins\n(`JoinNode` in the execution plan).\n", + "type": "integer" + }, + "writesExecuted": { + "description": "The total number of data-modification operations successfully executed.\n", + "type": "integer" + }, + "writesIgnored": { + "description": "The total number of data-modification operations that were unsuccessful,\nbut have been ignored because of the `ignoreErrors` query option.\n", + "type": "integer" + } + }, + "required": [ + "writesExecuted", + "writesIgnored", + "documentLookups", + "seeks", + "scannedFull", + "scannedIndex", + "cursorsCreated", + "cursorsRearmed", + "cacheHits", + "cacheMisses", + "filtered", + "httpRequests", + "executionTime", + "peakMemoryUsage", + "intermediateCommits" + ], + "type": "object" + }, + "warnings": { + "description": "A list of query warnings.\n", + "items": { + "properties": { + "code": { + "description": "An error code.\n", + "type": "integer" + }, + "message": { + "description": "A description of the problem.\n", + "type": "string" + } + }, + "required": [ + "code", + "message" + ], + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "warnings", + "stats" + ], + "type": "object" + }, + "hasMore": { + "description": "A boolean indicator whether there are more results\navailable for the cursor on the server.\n\nNote that even if `hasMore` returns `true`, the next call might still return no\ndocuments. Once `hasMore` is `false`, the cursor is exhausted and the client\ncan stop asking for more results.\n", + "type": "boolean" + }, + "id": { + "description": "The ID of the cursor for fetching more result batches.\n", + "type": "string" + }, + "nextBatchId": { + "description": "Only set if the `allowRetry` query option is enabled in v3.11.0.\nFrom v3.11.1 onward, this attribute is always set, except in the last batch.\n\nThe ID of the batch after the current one. The first batch has an ID of `1` and\nthe value is incremented by 1 with every batch. You can remember and use this\nbatch ID should retrieving the next batch fail. Use the\n`POST /_api/cursor/\u003ccursor-id\u003e/\u003cbatch-id\u003e` endpoint to ask for the batch again.\nYou can also request the next batch.\n", + "type": "string" + }, + "planCacheKey": { + "description": "The key of the plan cache entry. This attribute is only\npresent if a cached query execution plan has been used.\n", + "type": "string" + }, + "result": { + "description": "An array of result documents for the current batch\n(might be empty if the query has no results).\n", + "items": { + "type": "" + }, + "type": "array" + } + }, + "required": [ + "error", + "code", + "hasMore", + "cached" + ], + "type": "object" + } + } + }, + "description": "The server responds with *HTTP 200* in case of success.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP status code.\n", + "type": "integer" + }, + "error": { + "description": "A flag to indicate that an error occurred (`false` in this case).\n", + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message (if `error` is `true`).\n", + "type": "string" + }, + "errorNum": { + "description": "A server error number (if `error` is `true`).\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The cursor and the batch identifier are missing.\n" + }, + "404": { + "description": "A cursor with the specified identifier cannot be found, or the requested\nbatch isn't available.\n" + }, + "410": { + "description": "The server responds with *HTTP 410* if a server which processes the query\nor is the leader for a shard which is used in the query stops responding, but\nthe connection has not been closed.\n" + }, + "503": { + "description": "The server responds with *HTTP 503* if a server which processes the query\nor is the leader for a shard which is used in the query is down, either for\ngoing through a restart, a failure or connectivity issues.\n" + } + }, + "summary": "Read a batch from the cursor again", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/database/current": { + "get": { + "description": "Retrieves the properties of the current database\n\nThe response is a JSON object with the following attributes:\n\n- `name`: the name of the current database\n- `id`: the id of the current database\n- `path`: the filesystem path of the current database\n- `isSystem`: whether or not the current database is the `_system` database\n- `sharding`: the default sharding method for collections created in this database\n- `replicationFactor`: the default replication factor for collections in this database\n- `writeConcern`: the default write concern for collections in this database\n", + "operationId": "getCurrentDatabase", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "is returned if the information was retrieved successfully.\n" + }, + "400": { + "description": "is returned if the request is invalid.\n" + }, + "404": { + "description": "is returned if the database could not be found.\n" + } + }, + "summary": "Get information about the current database", + "tags": [ + "Databases" + ] + } + }, + "/_db/{database-name}/_api/database/user": { + "get": { + "description": "Retrieves the list of all databases the current user can access without\nspecifying a different username or password.\n", + "operationId": "listUserAccessibleDatabases", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "is returned if the list of database was compiled successfully.\n" + }, + "400": { + "description": "is returned if the request is invalid.\n" + } + }, + "summary": "List the accessible databases", + "tags": [ + "Databases" + ] + } + }, + "/_db/{database-name}/_api/document/{collection}": { + "delete": { + "description": "The body of the request is an array consisting of selectors for\ndocuments. A selector can either be a string with a key or a string\nwith a document identifier or an object with a `_key` attribute. This\nAPI call removes all specified documents from `collection`.\nIf the `ignoreRevs` query parameter is `false` and the\nselector is an object and has a `_rev` attribute, it is a\nprecondition that the actual revision of the removed document in the\ncollection is the specified one.\n\nThe body of the response is an array of the same length as the input\narray. For each input selector, the output contains a JSON object\nwith the information about the outcome of the operation. If no error\noccurred, then such an object has the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\nIn case of an error, the object has the `error` attribute set to `true`\nand `errorCode` set to the error code.\n\nIf the `waitForSync` parameter is not specified or set to `false`,\nthen the collection's default `waitForSync` behavior is applied.\nThe `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync`\nvalue of `true`.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 200 or 202, but the\n`X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", + "operationId": "deleteDocuments", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Collection from which documents are removed.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Wait until deletion operation has been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the deleted documents. If at least one of\nthe operations raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, ignore any `_rev` attribute in the selectors. No\nrevision check is performed. If set to `false` then revisions are checked.\nThe default is `true`.\n", + "in": "query", + "name": "ignoreRevs", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to delete existing entries from in-memory index caches and refill them\nif document removals affect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "An array of document selectors. A selector can be a string\n(document key or identifier) or an object that has to contain a\n`_key` attribute with the document key.\n", + "type": "array" + } + } + } + }, + "responses": { + "200": { + "description": "The individual operations have been processed and `waitForSync` was `true`.\n" + }, + "202": { + "description": "The individual operations have been processed and `waitForSync` was `false`.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The collection cannot be found.\nThe response body contains an error document in this case.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Remove multiple documents", + "tags": [ + "Documents" + ] + }, + "patch": { + "description": "Partially updates documents, the documents to update are specified\nby the `_key` attributes in the body objects. The body of the\nrequest must contain a JSON array of document updates with the\nattributes to patch (the patch documents). All attributes from the\npatch documents are added to the existing documents if they do\nnot yet exist, and overwritten in the existing documents if they do\nexist there.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nSetting an attribute value to `null` in the patch documents causes a\nvalue of `null` to be saved for the attribute by default.\n\nIf `ignoreRevs` is `false` and there is a `_rev` attribute in a\ndocument in the body and its value does not match the revision of\nthe corresponding document in the database, the precondition is\nviolated.\n\nCluster only: The patch document _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a *not found* error\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nThe body of the response contains a JSON array of the same length\nas the input array with the information about the identifier and the\nrevision of the updated documents. Each element has the following\nattributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIn case of an error or violated precondition, an error\nobject with the attribute `error` set to `true` and the attribute\n`errorCode` set to the error code is built.\n\nIf the query parameter `returnOld` is `true`, then, for each\ngenerated document, the complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 201 or 202, but the\n`X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", + "operationId": "updateDocuments", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` in which the documents are to be updated.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "If the intention is to delete existing attributes with the patch\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays).\n", + "in": "query", + "name": "keepNull", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Controls whether objects (not arrays) are merged if present in\nboth the existing and the patch document. If set to `false`, the\nvalue in the patch document overwrites the existing document's\nvalue. If set to `true`, objects are merged. The default is\n`true`.\n", + "in": "query", + "name": "mergeObjects", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Wait until the new documents have been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given documents are ignored. If this is set to `false`, then\nany `_rev` attribute given in a body document is taken as a\nprecondition. The document is only updated if the current revision\nis the one specified.\n", + "in": "query", + "name": "ignoreRevs", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete previous revision of the changed\ndocuments under the attribute `old` in the result.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete new documents under the attribute `new`\nin the result.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the updated documents. If at least one\noperation raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to update existing entries in in-memory index caches if document updates\naffect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "You can use the `versionAttribute` option for external versioning support.\nIf set, the attribute with the name specified by the option is looked up in the\nstored document and the attribute value is compared numerically to the value of\nthe versioning attribute in the supplied document that is supposed to update it.\n\nIf the version number in the new document is higher (rounded down to a whole number)\nthan in the document that already exists in the database, then the update\noperation is performed normally. This is also the case if the new versioning\nattribute has a non-numeric value, if it is a negative number, or if the\nattribute doesn't exist in the supplied or stored document.\n\nIf the version number in the new document is lower or equal to what exists in\nthe database, the operation is not performed and the existing document thus not\nchanged. No error is returned in this case.\n\nThe attribute can only be a top-level attribute.\n\nYou can check if `_oldRev` and `_rev` are different to determine if the\ndocument has been changed.\n", + "in": "query", + "name": "versionAttribute", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "An array of partial documents representing the desired updates.\nEach element has to contain a `_key` attribute. The existing\ndocuments with matching document keys are updated.\n", + "items": { + "type": "object" + }, + "type": "array" + } + } + } + }, + "responses": { + "201": { + "description": "The individual operations have been processed and `waitForSync` was `true`.\n" + }, + "202": { + "description": "The individual operations have been processed and `waitForSync` was `false`.\n" + }, + "400": { + "description": "The request body does not contain a valid JSON representation\nof an array of documents.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Update multiple documents", + "tags": [ + "Documents" + ] + }, + "post": { + "description": "Creates a new document from the document given in the body, unless there\nis already a document with the `_key` given. If no `_key` is given, a\nnew unique `_key` is generated automatically. The `_id` is automatically\nset in both cases, derived from the collection name and `_key`.\n\n\u003e **INFO:**\nAn `_id` or `_rev` attribute specified in the body is ignored.\n\n\nIf the document was created successfully, then the `Location` header\ncontains the path to the newly created document. The `ETag` header field\ncontains the revision of the document. Both are only set in the single\ndocument case.\n\nUnless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n\nIf the collection parameter `waitForSync` is `false`, then the call\nreturns as soon as the document has been accepted. It does not wait\nuntil the documents have been synced to disk.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document creation operation to disk even in\ncase that the `waitForSync` flag had been disabled for the entire\ncollection. Thus, the `waitForSync` query parameter can be used to\nforce synchronization of just this specific operations. To use this,\nset the `waitForSync` parameter to `true`. If the `waitForSync`\nparameter is not specified or set to `false`, then the collection's\ndefault `waitForSync` behavior is applied. The `waitForSync` query\nparameter cannot be used to disable synchronization for collections\nthat have a default `waitForSync` value of `true`.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n", + "operationId": "createDocument", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` in which the document is to be created.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Wait until document has been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Additionally return the complete new document under the attribute `new`\nin the result.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Additionally return the complete old document under the attribute `old`\nin the result. Only available if the overwrite option is used.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the created document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, the insert becomes a replace-insert. If a document with the\nsame `_key` already exists, the new document is not rejected with unique\nconstraint violation error but replaces the old document. Note that operations\nwith `overwrite` parameter require a `_key` attribute in the request payload,\ntherefore they can only be performed on collections sharded by `_key`.\n", + "in": "query", + "name": "overwrite", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "This option supersedes `overwrite` and offers the following modes:\n- `\"ignore\"`: if a document with the specified `_key` value exists already,\n nothing is done and no write operation is carried out. The\n insert operation returns success in this case. This mode does not\n support returning the old document version using `RETURN OLD`. When using\n `RETURN NEW`, `null` is returned in case the document already existed.\n- `\"replace\"`: if a document with the specified `_key` value exists already,\n it is overwritten with the specified document value. This mode is\n also used when no overwrite mode is specified but the `overwrite`\n flag is set to `true`.\n- `\"update\"`: if a document with the specified `_key` value exists already,\n it is patched (partially updated) with the specified document value.\n The overwrite mode can be further controlled via the `keepNull` and\n `mergeObjects` parameters.\n- `\"conflict\"`: if a document with the specified `_key` value exists already,\n return a unique constraint violation error so that the insert operation\n fails. This is also the default behavior in case the overwrite mode is\n not set, and the `overwrite` flag is `false` or not set either.\n", + "in": "query", + "name": "overwriteMode", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "If the intention is to delete existing attributes with the update-insert\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays). This option controls the update-insert behavior only.\n", + "in": "query", + "name": "keepNull", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Controls whether objects (not arrays) are merged if present in both, the\nexisting and the update-insert document. If set to `false`, the value in the\npatch document overwrites the existing document's value. If set to `true`,\nobjects are merged. The default is `true`.\nThis option controls the update-insert behavior only.\n", + "in": "query", + "name": "mergeObjects", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to add new entries to in-memory index caches if document insertions\naffect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Only applicable if `overwrite` is set to `true` or `overwriteMode`\nis set to `update` or `replace`.\n\nYou can use the `versionAttribute` option for external versioning support.\nIf set, the attribute with the name specified by the option is looked up in the\nstored document and the attribute value is compared numerically to the value of\nthe versioning attribute in the supplied document that is supposed to update/replace it.\n\nIf the version number in the new document is higher (rounded down to a whole number)\nthan in the document that already exists in the database, then the update/replace\noperation is performed normally. This is also the case if the new versioning\nattribute has a non-numeric value, if it is a negative number, or if the\nattribute doesn't exist in the supplied or stored document.\n\nIf the version number in the new document is lower or equal to what exists in\nthe database, the operation is not performed and the existing document thus not\nchanged. No error is returned in this case.\n\nThe attribute can only be a top-level attribute.\n\nYou can check if `_oldRev` (if present) and `_rev` are different to determine if the\ndocument has been changed.\n", + "in": "query", + "name": "versionAttribute", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "A JSON representation of a single document.\n", + "type": "object" + } + } + } + }, + "responses": { + "201": { + "description": "The document has been created successfully and\n`waitForSync` was `true`.\n" + }, + "202": { + "description": "The document has been created successfully and\n`waitForSync` was `false`.\n" + }, + "400": { + "description": "The request body does not contain a valid JSON representation\nof a document. The response body contains\nan error document in this case.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The collection cannot be found.\nThe response body contains an error document in this case.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "409": { + "description": "There are two possible reasons for this error in the single document case:\n\n- A document with the same qualifiers in an indexed attribute conflicts with an\n already existing document and thus violates the unique constraint.\n The response body contains an error document with the `errorNum` set to\n `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.\n- Locking the document key or some unique index entry failed to due to another\n concurrent operation that operates on the same document. This is also referred\n to as a _write-write conflict_. The response body contains an error document\n with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Create a document", + "tags": [ + "Documents" + ] + }, + "put": { + "description": "Replaces multiple documents in the specified collection with the\nones in the body, the replaced documents are specified by the `_key`\nattributes in the body documents.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nIf `ignoreRevs` is `false` and there is a `_rev` attribute in a\ndocument in the body and its value does not match the revision of\nthe corresponding document in the database, the precondition is\nviolated.\n\nCluster only: The replace documents _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a `not found` error.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nThe body of the response contains a JSON array of the same length\nas the input array with the information about the identifier and the\nrevision of the replaced documents. In each element has the following\nattributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIn case of an error or violated precondition, an error\nobject with the attribute `error` set to `true` and the attribute\n`errorCode` set to the error code is built.\n\nIf the query parameter `returnOld` is `true`, then, for each\ngenerated document, the complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n\nNote that if any precondition is violated or an error occurred with\nsome of the documents, the return code is still 201 or 202, but the\n`X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", + "operationId": "replaceDocuments", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "This URL parameter is the name of the collection in which the\ndocuments are replaced.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Wait until the new documents have been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given documents are ignored. If this is set to `false`, then\nany `_rev` attribute given in a body document is taken as a\nprecondition. The document is only replaced if the current revision\nis the one specified.\n", + "in": "query", + "name": "ignoreRevs", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete previous revision of the changed\ndocuments under the attribute `old` in the result.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete new documents under the attribute `new`\nin the result.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the replaced documents. If at least one\noperation raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to update existing entries in in-memory index caches if documents\nreplacements affect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "You can use the `versionAttribute` option for external versioning support.\nIf set, the attribute with the name specified by the option is looked up in the\nstored document and the attribute value is compared numerically to the value of\nthe versioning attribute in the supplied document that is supposed to replace it.\n\nIf the version number in the new document is higher (rounded down to a whole number)\nthan in the document that already exists in the database, then the replace\noperation is performed normally. This is also the case if the new versioning\nattribute has a non-numeric value, if it is a negative number, or if the\nattribute doesn't exist in the supplied or stored document.\n\nIf the version number in the new document is lower or equal to what exists in\nthe database, the operation is not performed and the existing document thus not\nchanged. No error is returned in this case.\n\nThe attribute can only be a top-level attribute.\n\nYou can check if `_oldRev` and `_rev` are different to determine if the\ndocument has been changed.\n", + "in": "query", + "name": "versionAttribute", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "An array of documents. Each element has to contain a `_key` attribute.\nThe existing documents with matching document keys are replaced.\n", + "items": { + "type": "object" + }, + "type": "array" + } + } + } + }, + "responses": { + "201": { + "description": "The individual operations have been processed and `waitForSync` was `true`.\n" + }, + "202": { + "description": "The individual operations have been processed and `waitForSync` was `false`.\n" + }, + "400": { + "description": "The request body does not contain a valid JSON representation\nof an array of documents.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Replace multiple documents", + "tags": [ + "Documents" + ] + } + }, + "/_db/{database-name}/_api/document/{collection}#get": { + "put": { + "description": "\u003e **WARNING:**\nThe endpoint for getting multiple documents is the same as for replacing\nmultiple documents but with an additional query parameter:\n`PUT /_api/document/{collection}?onlyget=true`. This is because a lot of\nsoftware does not support payload bodies in `GET` requests.\n\n\nReturns the documents identified by their `_key` in the body objects.\nThe body of the request _must_ contain a JSON array of either\nstrings (the `_key` values to lookup) or search documents.\n\nA search document _must_ contain at least a value for the `_key` field.\nA value for `_rev` _may_ be specified to verify whether the document\nhas the same revision value, unless _ignoreRevs_ is set to false.\n\nCluster only: The search document _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a *not found* error.\n\nThe returned array of documents contain three special attributes: \n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n", + "operationId": "getDocuments", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` from which the documents are to be read.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "This parameter is required to be `true`, otherwise a replace\noperation is executed!\n", + "example": true, + "in": "query", + "name": "onlyget", + "required": true, + "schema": { + "type": "boolean" + } + }, + { + "description": "Should the value be `true` (the default):\nIf a search document contains a value for the `_rev` field,\nthen the document is only returned if it has the same revision value.\nOtherwise a precondition failed error is returned.\n", + "in": "query", + "name": "ignoreRevs", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThe header is ignored if this operation is part of a Stream Transaction\n(`x-arango-trx-id` header). The header set when creating the transaction decides\nabout dirty reads for the entire transaction, not the individual read operations.\n", + "in": "header", + "name": "x-arango-allow-dirty-read", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "An array of documents to retrieve.\n", + "items": { + "type": "object" + }, + "type": "array" + } + } + } + }, + "responses": { + "200": { + "description": "No error occurred for the overall operation.\n" + }, + "400": { + "description": "The request body does not contain a valid JSON representation\nof an array of documents.\n" + }, + "404": { + "description": "The collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + } + }, + "summary": "Get multiple documents", + "tags": [ + "Documents" + ] + } + }, + "/_db/{database-name}/_api/document/{collection}#multiple": { + "post": { + "description": "Creates new documents from the documents given in the body, unless there\nis already a document with the `_key` given. If no `_key` is given, a new\nunique `_key` is generated automatically. The `_id` is automatically\nset in both cases, derived from the collection name and `_key`.\n\nThe result body contains a JSON array of the\nsame length as the input array, and each entry contains the result\nof the operation for the corresponding input. In case of an error\nthe entry is a document with attributes `error` set to `true` and\nerrorCode set to the error code that has happened.\n\n\u003e **INFO:**\nAny `_id` or `_rev` attribute specified in the body is ignored.\n\n\nUnless `silent` is set to `true`, the body of the response contains an\narray of JSON objects with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n\nIf the collection parameter `waitForSync` is `false`, then the call\nreturns as soon as the documents have been accepted. It does not wait\nuntil the documents have been synced to disk.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document creation operation to disk even in\ncase that the `waitForSync` flag had been disabled for the entire\ncollection. Thus, the `waitForSync` query parameter can be used to\nforce synchronization of just this specific operations. To use this,\nset the `waitForSync` parameter to `true`. If the `waitForSync`\nparameter is not specified or set to `false`, then the collection's\ndefault `waitForSync` behavior is applied. The `waitForSync` query\nparameter cannot be used to disable synchronization for collections\nthat have a default `waitForSync` value of `true`.\n\nIf the query parameter `returnNew` is `true`, then, for each\ngenerated document, the complete new document is returned under\nthe `new` attribute in the result.\n\nShould an error have occurred with some of the documents,\nthe `X-Arango-Error-Codes` HTTP header is set. It contains a map of the\nerror codes and how often each kind of error occurred. For example,\n`1200:17,1205:10` means that in 17 cases the error 1200 (\"revision conflict\")\nhas happened, and in 10 cases the error 1205 (\"illegal document handle\").\n", + "operationId": "createDocuments", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` in which the documents are to be created.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Wait until document has been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Additionally return the complete new document under the attribute `new`\nin the result.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Additionally return the complete old document under the attribute `old`\nin the result. Only available if the overwrite option is used.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if all document operations\nsucceed. No meta-data is returned for the created documents. If any of the\noperations raises an error, an array with the error object(s) is returned.\n\nYou can use this option to save network traffic but you cannot map any errors\nto the inputs of your request.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, the insert becomes a replace-insert. If a document with the\nsame `_key` already exists, the new document is not rejected with a unique\nconstraint violation error but replaces the old document. Note that operations\nwith `overwrite` parameter require a `_key` attribute in the request payload,\ntherefore they can only be performed on collections sharded by `_key`.\n", + "in": "query", + "name": "overwrite", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "This option supersedes `overwrite` and offers the following modes:\n- `\"ignore\"`: if a document with the specified `_key` value exists already,\n nothing is done and no write operation is carried out. The\n insert operation returns success in this case. This mode does not\n support returning the old document version using `RETURN OLD`. When using\n `RETURN NEW`, `null` is returned in case the document already existed.\n- `\"replace\"`: if a document with the specified `_key` value exists already,\n it is overwritten with the specified document value. This mode is\n also used when no overwrite mode is specified but the `overwrite`\n flag is set to `true`.\n- `\"update\"`: if a document with the specified `_key` value exists already,\n it is patched (partially updated) with the specified document value.\n The overwrite mode can be further controlled via the `keepNull` and\n `mergeObjects` parameters.\n- `\"conflict\"`: if a document with the specified `_key` value exists already,\n return a unique constraint violation error so that the insert operation\n fails. This is also the default behavior in case the overwrite mode is\n not set, and the `overwrite` flag is `false` or not set either.\n", + "in": "query", + "name": "overwriteMode", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "If the intention is to delete existing attributes with the update-insert\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays). This option controls the update-insert behavior only.\n", + "in": "query", + "name": "keepNull", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Controls whether objects (not arrays) are merged if present in both, the\nexisting and the update-insert document. If set to `false`, the value in the\npatch document overwrites the existing document's value. If set to `true`,\nobjects are merged. The default is `true`.\nThis option controls the update-insert behavior only.\n", + "in": "query", + "name": "mergeObjects", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to add new entries to in-memory index caches if document insertions\naffect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Only applicable if `overwrite` is set to `true` or `overwriteMode`\nis set to `update` or `replace`.\n\nYou can use the `versionAttribute` option for external versioning support.\nIf set, the attribute with the name specified by the option is looked up in the\nstored document and the attribute value is compared numerically to the value of\nthe versioning attribute in the supplied document that is supposed to update/replace it.\n\nIf the version number in the new document is higher (rounded down to a whole number)\nthan in the document that already exists in the database, then the update/replace\noperation is performed normally. This is also the case if the new versioning\nattribute has a non-numeric value, if it is a negative number, or if the\nattribute doesn't exist in the supplied or stored document.\n\nIf the version number in the new document is lower or equal to what exists in\nthe database, the operation is not performed and the existing document thus not\nchanged. No error is returned in this case.\n\nThe attribute can only be a top-level attribute.\n\nYou can check if `_oldRev` (if present) and `_rev` are different to determine if the\ndocument has been changed.\n", + "in": "query", + "name": "versionAttribute", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "An array of documents to create.\n", + "items": { + "type": "object" + }, + "type": "array" + } + } + } + }, + "responses": { + "201": { + "description": "The individual operations have been processed and `waitForSync` was `true`.\n" + }, + "202": { + "description": "The individual operations have been processed and `waitForSync` was `false`.\n" + }, + "400": { + "description": "The request body does not contain a valid JSON representation\nof an array of documents.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The document or collection cannot be found.\nThe response body contains an error document in this case.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Create multiple documents", + "tags": [ + "Documents" + ] + } + }, + "/_db/{database-name}/_api/document/{collection}/{key}": { + "delete": { + "description": "Unless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n\nIf the `waitForSync` parameter is not specified or set to `false`,\nthen the collection's default `waitForSync` behavior is applied.\nThe `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync`\nvalue of `true`.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n", + "operationId": "deleteDocument", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` in which the document is to be deleted.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The document key.\n", + "in": "path", + "name": "key", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Wait until deletion operation has been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the deleted document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to delete existing entries from in-memory index caches and refill them\nif document removals affect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "You can conditionally remove a document based on a target revision id by\nusing the `if-match` HTTP header.\n", + "in": "header", + "name": "If-Match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The document has been removed successfully and\n`waitForSync` was `true`.\n" + }, + "202": { + "description": "The document has been removed successfully and\n`waitForSync` was `false`.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The document or collection cannot be found.\nThe response body contains an error document in this case.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "409": { + "description": "Locking the document key failed due to another\nconcurrent operation that operates on the same document.\nThis is also referred to as a _write-write conflict_.\nThe response body contains an error document with the\n`errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "description": "An `If-Match` header is specified and the found\ndocument has a different version. The response includes the found\ndocument's current revision in the `_rev` attribute. Additionally, the\nattributes `_id` and `_key` are returned.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Remove a document", + "tags": [ + "Documents" + ] + }, + "get": { + "description": "Returns the document identified by the collection name and document key.\nThe returned document contains three special attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the document revision.\n", + "operationId": "getDocument", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the collection from which the document is to be read.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The document key.\n", + "in": "path", + "name": "key", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one\nETag. The document is returned, if it has a different revision than the\ngiven ETag. Otherwise an *HTTP 304* is returned.\n", + "in": "header", + "name": "If-None-Match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one\nETag. The document is returned, if it has the same revision as the\ngiven ETag. Otherwise a *HTTP 412* is returned.\n", + "in": "header", + "name": "If-Match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThe header is ignored if this operation is part of a Stream Transaction\n(`x-arango-trx-id` header). The header set when creating the transaction decides\nabout dirty reads for the entire transaction, not the individual read operations.\n", + "in": "header", + "name": "x-arango-allow-dirty-read", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The document has been found.\n" + }, + "304": { + "description": "The `If-None-Match` header is specified and the document\nhas the same revision.\n" + }, + "404": { + "description": "The document or collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "description": "An `If-Match` header is specified and the found\ndocument has a different revision. The response includes the found\ndocument's current revision in the `_rev` attribute. Additionally, the\n`_id` and `_key` attributes are returned.\n" + } + }, + "summary": "Get a document", + "tags": [ + "Documents" + ] + }, + "head": { + "description": "Like `GET`, but only returns the header fields and not the body. You\ncan use this call to get the current revision of a document or check if\nthe document was deleted.\n", + "operationId": "getDocumentHeader", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` from which the document is to be read.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The document key.\n", + "in": "path", + "name": "key", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one\nETag. If the current document revision is not equal to the specified ETag,\nan *HTTP 200* response is returned. If the current document revision is\nidentical to the specified ETag, then an *HTTP 304* is returned.\n", + "in": "header", + "name": "If-None-Match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one\nETag. The document is returned, if it has the same revision as the\ngiven ETag. Otherwise a *HTTP 412* is returned.\n", + "in": "header", + "name": "If-Match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThe header is ignored if this operation is part of a Stream Transaction\n(`x-arango-trx-id` header). The header set when creating the transaction decides\nabout dirty reads for the entire transaction, not the individual read operations.\n", + "in": "header", + "name": "x-arango-allow-dirty-read", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The document has been found.\n" + }, + "304": { + "description": "An `If-None-Match` header is specified and the document has\nthe same version.\n" + }, + "404": { + "description": "The document or collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "description": "An `If-Match` header is given and the found\ndocument has a different version. The response includes the found\ndocument's current revision in the `ETag` header.\n" + } + }, + "summary": "Get a document header", + "tags": [ + "Documents" + ] + }, + "patch": { + "description": "Partially updates the document identified by the *document ID*.\nThe body of the request must contain a JSON document with the\nattributes to patch (the patch document). All attributes from the\npatch document are added to the existing document if they do not\nyet exist, and overwritten in the existing document if they do exist\nthere.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nSetting an attribute value to `null` in the patch document causes a\nvalue of `null` to be saved for the attribute by default.\n\nIf the `If-Match` header is specified and the revision of the\ndocument in the database is unequal to the given revision, the\nprecondition is violated.\n\nIf `If-Match` is not given and `ignoreRevs` is `false` and there\nis a `_rev` attribute in the body and its value does not match\nthe revision of the document in the database, the precondition is\nviolated.\n\nIf a precondition is violated, an *HTTP 412* is returned.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on `waitForSync`, see below),\nthe `ETag` header field contains the new revision of the document\n(in double quotes) and the `Location` header contains a complete URL\nunder which the document can be queried.\n\nCluster only: The patch document _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a `not found` error\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the updated document operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nUnless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then\nthe complete new document is returned under\nthe `new` attribute in the result.\n\nIf the document does not exist, then a *HTTP 404* is returned and the\nbody of the response contains an error document.\n", + "operationId": "updateDocument", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` in which the document is to be updated.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The document key.\n", + "in": "path", + "name": "key", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "If the intention is to delete existing attributes with the patch\ncommand, set the `keepNull` URL query parameter to `false`. This modifies the\nbehavior of the patch command to remove top-level attributes and sub-attributes\nfrom the existing document that are contained in the patch document with an\nattribute value of `null` (but not attributes of objects that are nested inside\nof arrays).\n", + "in": "query", + "name": "keepNull", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Controls whether objects (not arrays) are merged if present in\nboth the existing and the patch document. If set to `false`, the\nvalue in the patch document overwrites the existing document's\nvalue. If set to `true`, objects are merged. The default is\n`true`.\n", + "in": "query", + "name": "mergeObjects", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Wait until document has been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given document is ignored. If this is set to `false`, then\nthe `_rev` attribute given in the body document is taken as a\nprecondition. The document is only updated if the current revision\nis the one specified.\n", + "in": "query", + "name": "ignoreRevs", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete new document under the attribute `new`\nin the result.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the updated document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to update existing entries in in-memory index caches if document updates\naffect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "You can use the `versionAttribute` option for external versioning support.\nIf set, the attribute with the name specified by the option is looked up in the\nstored document and the attribute value is compared numerically to the value of\nthe versioning attribute in the supplied document that is supposed to update it.\n\nIf the version number in the new document is higher (rounded down to a whole number)\nthan in the document that already exists in the database, then the update\noperation is performed normally. This is also the case if the new versioning\nattribute has a non-numeric value, if it is a negative number, or if the\nattribute doesn't exist in the supplied or stored document.\n\nIf the version number in the new document is lower or equal to what exists in\nthe database, the operation is not performed and the existing document thus not\nchanged. No error is returned in this case.\n\nThe attribute can only be a top-level attribute.\n\nYou can check if `_oldRev` and `_rev` are different to determine if the\ndocument has been changed.\n", + "in": "query", + "name": "versionAttribute", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "You can conditionally update a document based on a target revision id by\nusing the `if-match` HTTP header.\n", + "in": "header", + "name": "If-Match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "A JSON representation of a (partial) document.\n", + "type": "object" + } + } + } + }, + "responses": { + "201": { + "description": "The document has been updated successfully and\n`waitForSync` was `true`.\n" + }, + "202": { + "description": "The document has been updated successfully and\n`waitForSync` was `false`.\n" + }, + "400": { + "description": "The request body does not contain a valid JSON representation\nof a document. The response body contains\nan error document in this case.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The document or collection cannot be found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "409": { + "description": "There are two possible reasons for this error:\n\n- The update causes a unique constraint violation in a secondary index.\n The response body contains an error document with the `errorNum` set to\n `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.\n- Locking the document key or some unique index entry failed due to another\n concurrent operation that operates on the same document. This is also referred\n to as a _write-write conflict_. The response body contains an error document\n with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "description": "The precondition was violated. The response also contains\nthe found documents' current revisions in the `_rev` attributes.\nAdditionally, the attributes `_id` and `_key` are returned.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Update a document", + "tags": [ + "Documents" + ] + }, + "put": { + "description": "Replaces the specified document with the one in the body, provided there is\nsuch a document and no precondition is violated.\n\nThe values of the `_key`, `_id`, and `_rev` system attributes as well as\nattributes used as sharding keys cannot be changed.\n\nIf the `If-Match` header is specified and the revision of the\ndocument in the database is unequal to the given revision, the\nprecondition is violated.\n\nIf `If-Match` is not given and `ignoreRevs` is `false` and there\nis a `_rev` attribute in the body and its value does not match\nthe revision of the document in the database, the precondition is\nviolated.\n\nIf a precondition is violated, an *HTTP 412* is returned.\n\nIf the document exists and can be updated, then an *HTTP 201* or\nan *HTTP 202* is returned (depending on `waitForSync`, see below),\nthe `ETag` header field contains the new revision of the document\nand the `Location` header contains a complete URL under which the\ndocument can be queried.\n\nCluster only: The replace documents _may_ contain\nvalues for the collection's pre-defined shard keys. Values for the shard keys\nare treated as hints to improve performance. Should the shard keys\nvalues be incorrect ArangoDB may answer with a *not found* error.\n\nOptionally, the query parameter `waitForSync` can be used to force\nsynchronization of the document replacement operation to disk even in case\nthat the `waitForSync` flag had been disabled for the entire collection.\nThus, the `waitForSync` query parameter can be used to force synchronization\nof just specific operations. To use this, set the `waitForSync` parameter\nto `true`. If the `waitForSync` parameter is not specified or set to\n`false`, then the collection's default `waitForSync` behavior is\napplied. The `waitForSync` query parameter cannot be used to disable\nsynchronization for collections that have a default `waitForSync` value\nof `true`.\n\nUnless `silent` is set to `true`, the body of the response contains a\nJSON object with the following attributes:\n- `_id`, containing the document identifier with the format `\u003ccollection-name\u003e/\u003cdocument-key\u003e`.\n- `_key`, containing the document key that uniquely identifies a document within the collection.\n- `_rev`, containing the new document revision.\n\nIf the query parameter `returnOld` is `true`, then\nthe complete previous revision of the document\nis returned under the `old` attribute in the result.\n\nIf the query parameter `returnNew` is `true`, then\nthe complete new document is returned under\nthe `new` attribute in the result.\n\nIf the document does not exist, then a *HTTP 404* is returned and the\nbody of the response contains an error document.\n", + "operationId": "replaceDocument", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the `collection` in which the document is to be replaced.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The document key.\n", + "in": "path", + "name": "key", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Wait until document has been synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "By default, or if this is set to `true`, the `_rev` attributes in\nthe given document is ignored. If this is set to `false`, then\nthe `_rev` attribute given in the body document is taken as a\nprecondition. The document is only replaced if the current revision\nis the one specified.\n", + "in": "query", + "name": "ignoreRevs", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete previous revision of the changed\ndocument under the attribute `old` in the result.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Return additionally the complete new document under the attribute `new`\nin the result.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, an empty object is returned as response if the document operation\nsucceeds. No meta-data is returned for the replaced document. If the\noperation raises an error, an error object is returned.\n\nYou can use this option to save network traffic.\n", + "in": "query", + "name": "silent", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to update existing entries in in-memory index caches if documents\nreplacements affect the edge index or cache-enabled persistent indexes.\n", + "in": "query", + "name": "refillIndexCaches", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "You can use the `versionAttribute` option for external versioning support.\nIf set, the attribute with the name specified by the option is looked up in the\nstored document and the attribute value is compared numerically to the value of\nthe versioning attribute in the supplied document that is supposed to replace it.\n\nIf the version number in the new document is higher (rounded down to a whole number)\nthan in the document that already exists in the database, then the replace\noperation is performed normally. This is also the case if the new versioning\nattribute has a non-numeric value, if it is a negative number, or if the\nattribute doesn't exist in the supplied or stored document.\n\nIf the version number in the new document is lower or equal to what exists in\nthe database, the operation is not performed and the existing document thus not\nchanged. No error is returned in this case.\n\nThe attribute can only be a top-level attribute.\n\nYou can check if `_oldRev` and `_rev` are different to determine if the\ndocument has been changed.\n", + "in": "query", + "name": "versionAttribute", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "You can conditionally replace a document based on a target revision id by\nusing the `if-match` HTTP header.\n", + "in": "header", + "name": "If-Match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "A JSON representation of a single document.\n", + "type": "object" + } + } + } + }, + "responses": { + "201": { + "description": "The document has been replaced successfully and\n`waitForSync` was `true`.\n" + }, + "202": { + "description": "The document has been replaced successfully and\n`waitForSync` was `false`.\n" + }, + "400": { + "description": "The request body does not contain a valid JSON representation\nof a document. The response body contains\nan error document in this case.\n" + }, + "403": { + "description": "If the error code is `1004`, the specified write concern for the\ncollection cannot be fulfilled. This can happen if less than the number of\nspecified replicas for a shard are currently in-sync with the leader. For example,\nif the write concern is `2` and the replication factor is `3`, then the\nwrite concern is not fulfilled if two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + }, + "404": { + "description": "The collection or the document was not found.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "409": { + "description": "There are two possible reasons for this error:\n\n- The replace operation causes a unique constraint violation in a secondary\n index. The response body contains an error document with the `errorNum` set to\n `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.\n- Locking the document key or some unique index entry failed due to another\n concurrent operation that operates on the same document. This is also referred\n to as a _write-write conflict_. The response body contains an error document\n with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.\n" + }, + "410": { + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "description": "The precondition is violated. The response includes\nthe found documents' current revisions in the `_rev` attributes.\nAdditionally, the attributes `_id` and `_key` are returned.\n" + }, + "503": { + "description": "The system is temporarily not available. This can be a system\noverload or temporary failure. In this case it makes sense to retry the request\nlater.\n\nIf the error code is `1429`, then the write concern for the collection cannot be\nfulfilled. This can happen if less than the number of specified replicas for\na shard are currently in-sync with the leader. For example, if the write concern\nis `2` and the replication factor is `3`, then the write concern is not fulfilled\nif two replicas are not in-sync.\n\nNote that the HTTP status code is configurable via the\n`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`\nbut can be changed to `503` to signal client applications that it is a\ntemporary error.\n" + } + }, + "summary": "Replace a document", + "tags": [ + "Documents" + ] + } + }, + "/_db/{database-name}/_api/edges/{collection-id}": { + "get": { + "description": "Returns an array of edges starting or ending in the vertex identified by\n`vertex`.\n", + "operationId": "getVertexEdges", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The id of the collection.\n", + "in": "path", + "name": "collection-id", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The id of the start vertex.\n", + "in": "query", + "name": "vertex", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Selects `in` or `out` direction for edges. If not set, any edges are\nreturned.\n", + "in": "query", + "name": "direction", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n", + "in": "header", + "name": "x-arango-allow-dirty-read", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "is returned if the edge collection was found and edges were retrieved.\n" + }, + "400": { + "description": "is returned if the request contains invalid parameters.\n" + }, + "404": { + "description": "is returned if the edge collection was not found.\n" + } + }, + "summary": "Get inbound and outbound edges", + "tags": [ + "Graphs" + ] + } + }, + "/_db/{database-name}/_api/engine": { + "get": { + "description": "Returns the storage engine the server is configured to use.\nThe response is a JSON object with the following attributes:\n", + "operationId": "getEngine", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "name": { + "description": "will be `rocksdb`\n", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + } + } + }, + "description": "is returned in all cases.\n" + } + }, + "summary": "Get the storage engine type", + "tags": [ + "Administration" + ] + } + }, + "/_db/{database-name}/_api/explain": { + "post": { + "description": "To explain how an AQL query would be executed on the server, the query string\ncan be sent to the server via an HTTP POST request. The server will then validate\nthe query and create an execution plan for it. The execution plan will be\nreturned, but the query will not be executed.\n\nThe execution plan that is returned by the server can be used to estimate the\nprobable performance of the query. Though the actual performance will depend\non many different factors, the execution plan normally can provide some rough\nestimates on the amount of work the server needs to do in order to actually run\nthe query.\n\nBy default, the explain operation will return the optimal plan as chosen by\nthe query optimizer The optimal plan is the plan with the lowest total estimated\ncost. The plan will be returned in the attribute `plan` of the response object.\nIf the option `allPlans` is specified in the request, the result will contain\nall plans created by the optimizer. The plans will then be returned in the\nattribute `plans`.\n\nThe result will also contain an attribute `warnings`, which is an array of\nwarnings that occurred during optimization or execution plan creation. Additionally,\na `stats` attribute is contained in the result with some optimizer statistics.\nIf `allPlans` is set to `false`, the result will contain an attribute `cacheable`\nthat states whether the query results can be cached on the server if the query\nresult cache were used. The `cacheable` attribute is not present when `allPlans`\nis set to `true`.\n\nEach plan in the result is a JSON object with the following attributes:\n- `nodes`: the array of execution nodes of the plan.\n\n- `estimatedCost`: the total estimated cost for the plan. If there are multiple\n plans, the optimizer will choose the plan with the lowest total cost.\n\n- `collections`: an array of collections used in the query\n\n- `rules`: an array of rules the optimizer applied.\n\n- `variables`: array of variables used in the query (note: this may contain\n internal variables created by the optimizer)\n", + "operationId": "explainAqlQuery", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "bindVars": { + "description": "An object with key/value pairs representing the bind parameters.\nFor a bind variable `@var` in the query, specify the value using an attribute\nwith the name `var`. For a collection bind variable `@@coll`, use `@coll` as the\nattribute name. For example: `\"bindVars\": { \"var\": 42, \"@coll\": \"products\" }`.\n", + "type": "object" + }, + "options": { + "description": "Options for the query\n", + "properties": { + "allPlans": { + "description": "if set to `true`, all possible execution plans will be returned.\nThe default is `false`, meaning only the optimal plan will be returned.\n", + "type": "boolean" + }, + "failOnWarning": { + "description": "If set to `true`, the query throws an exception and aborts instead of producing\na warning. You should use this option during development to catch potential issues\nearly. When the attribute is set to `false`, warnings are not propagated to\nexceptions and are returned with the query result.\n\nYou can use the `--query.fail-on-warning` startup option to adjust the\ndefault value for `failOnWarning` so you don't need to set it on a per-query basis.\n", + "type": "boolean" + }, + "fullCount": { + "description": "Whether to calculate the total number of documents matching the\nfilter conditions as if the query's final top-level `LIMIT` operation\nwere not applied. This option generally leads to different\nexecution plans.\n", + "type": "boolean" + }, + "maxNodesPerCallstack": { + "description": "The number of execution nodes in the query plan after that stack splitting is\nperformed to avoid a potential stack overflow. Defaults to the configured value\nof the startup option `--query.max-nodes-per-callstack`.\n\nThis option is only useful for testing and debugging and normally does not need\nany adjustment.\n", + "type": "integer" + }, + "maxNumberOfPlans": { + "description": "The maximum number of plans that the optimizer is allowed to\ngenerate. Setting this attribute to a low value allows to put a\ncap on the amount of work the optimizer does.\n", + "type": "integer" + }, + "maxWarningCount": { + "description": "Limits the number of warnings a query can return. The maximum number of warnings\nis `10` by default but you can increase or decrease the limit.\n", + "type": "integer" + }, + "optimizer": { + "description": "Options related to the query optimizer.\n", + "properties": { + "rules": { + "description": "A list of to-be-included or to-be-excluded optimizer rules can be put into this\nattribute, telling the optimizer to include or exclude specific rules. To disable\na rule, prefix its name with a `-`, to enable a rule, prefix it with a `+`. There is\nalso a pseudo-rule `all`, which matches all optimizer rules. `-all` disables all rules.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "profile": { + "description": "Whether to include additional query profiling information.\nIf set to `2`, the response includes the time it took to process\neach optimizer rule under `stats.rules`.\n", + "type": "integer" + }, + "usePlanCache": { + "default": false, + "description": "Set this option to `true` to utilize a cached query plan or add the execution plan\nof this query to the cache if it's not in the cache yet. Otherwise, the plan cache\nis bypassed (introduced in v3.12.4).\n\nQuery plan caching can reduce the total time for processing queries by avoiding\nto parse, plan, and optimize queries over and over again that effectively have\nthe same execution plan with at most some changes to bind parameter values.\n\nAn error is raised if a query doesn't meet the requirements for plan caching.\nSee [Cache eligibility](https://docs.arangodb.com/3.12/aql/execution-and-performance/caching-query-plans/#cache-eligibility)\nfor details.\n", + "type": "boolean" + } + }, + "type": "object" + }, + "query": { + "description": "the query which you want explained; If the query references any bind variables,\nthese must also be passed in the attribute `bindVars`. Additional\noptions for the query can be passed in the `options` attribute.\n", + "type": "string" + } + }, + "required": [ + "query" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "If the query is valid, the server will respond with *HTTP 200* and\nreturn the optimal execution plan in the `plan` attribute of the response.\nIf option `allPlans` was set in the request, an array of plans will be returned\nin the `allPlans` attribute instead.\n" + }, + "400": { + "description": "The request is malformed or the query contains a parse error.\nThe body of the response contains the error details embedded in a JSON object.\nOmitting bind variables if the query references any also results\nan error.\n" + }, + "404": { + "description": "A non-existing collection is accessed in the query.\n" + } + }, + "summary": "Explain an AQL query", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/foxx": { + "get": { + "description": "Fetches a list of services installed in the current database.\n\nReturns a list of objects with the following attributes:\n\n- `mount`: the mount path of the service\n- `development`: `true` if the service is running in development mode\n- `legacy`: `true` if the service is running in 2.8 legacy compatibility mode\n- `provides`: the service manifest's `provides` value or an empty object\n\nAdditionally the object may contain the following attributes if they have been set on the manifest:\n\n- `name`: a string identifying the service type\n- `version`: a semver-compatible version string\n", + "operationId": "listFoxxServices", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Whether or not system services should be excluded from the result.\n", + "in": "query", + "name": "excludeSystem", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "List the installed services", + "tags": [ + "Foxx" + ] + }, + "post": { + "description": "Installs the given new service at the given mount path.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- `configuration`: a JSON object describing configuration values\n- `dependencies`: a JSON object describing dependency settings\n- `source`: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the `source` field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf `source` is a URL, the URL must be reachable from the server.\nIf `source` is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple Coordinators\nthe file system path must resolve to equivalent files on every Coordinator.\n", "operationId": "createFoxxService", "parameters": [ { - "description": "Mount path the service should be installed at.\n", - "in": "query", - "name": "mount", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path the service should be installed at.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Set to `true` to enable development mode.\n", + "in": "query", + "name": "development", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `false` to not run the service's setup script.\n", + "in": "query", + "name": "setup", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `true` to install the service in 2.8 legacy compatibility mode.\n", + "in": "query", + "name": "legacy", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "201": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Install a new service", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/commit": { + "post": { + "description": "Commits the local service state of the Coordinator to the database.\n\nThis can be used to resolve service conflicts between Coordinators that cannot be fixed automatically due to missing data.\n", + "operationId": "commitFoxxServiceState", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Overwrite existing service files in database even if they already exist.\n", + "in": "query", + "name": "replace", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "204": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Commit the local service state", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/configuration": { + "get": { + "description": "Fetches the current configuration for the service at the given mount path.\n\nReturns an object mapping the configuration option names to their definitions\nincluding a human-friendly `title` and the `current` value (if any).\n", + "operationId": "getFoxxConfiguration", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Get the configuration options", + "tags": [ + "Foxx" + ] + }, + "patch": { + "description": "Replaces the given service's configuration.\n\nReturns an object mapping all configuration option names to their new values.\n", + "operationId": "updateFoxxConfiguration", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "options": { + "description": "A JSON object mapping configuration option names to their new values.\nAny omitted options will be ignored.\n", + "type": "object" + } + }, + "required": [ + "options" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Update the configuration options", + "tags": [ + "Foxx" + ] + }, + "put": { + "description": "Replaces the given service's configuration completely.\n\nReturns an object mapping all configuration option names to their new values.\n", + "operationId": "replaceFoxxConfiguration", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "options": { + "description": "A JSON object mapping configuration option names to their new values.\nAny omitted options will be reset to their default values or marked as unconfigured.\n", + "type": "object" + } + }, + "required": [ + "options" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Replace the configuration options", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/dependencies": { + "get": { + "description": "Fetches the current dependencies for service at the given mount path.\n\nReturns an object mapping the dependency names to their definitions\nincluding a human-friendly `title` and the `current` mount path (if any).\n", + "operationId": "getFoxxDependencies", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Get the dependency options", + "tags": [ + "Foxx" + ] + }, + "patch": { + "description": "Replaces the given service's dependencies.\n\nReturns an object mapping all dependency names to their new mount paths.\n", + "operationId": "updateFoxxDependencies", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "options": { + "description": "A JSON object mapping dependency names to their new mount paths.\nAny omitted dependencies will be ignored.\n", + "type": "object" + } + }, + "required": [ + "options" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Update the dependency options", + "tags": [ + "Foxx" + ] + }, + "put": { + "description": "Replaces the given service's dependencies completely.\n\nReturns an object mapping all dependency names to their new mount paths.\n", + "operationId": "replaceFoxxDependencies", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "options": { + "description": "A JSON object mapping dependency names to their new mount paths.\nAny omitted dependencies will be disabled.\n", + "type": "object" + } + }, + "required": [ + "options" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Replace the dependency options", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/development": { + "delete": { + "description": "Puts the service at the given mount path into production mode.\n\nWhen running ArangoDB in a cluster with multiple Coordinators this will\nreplace the service on all other Coordinators with the version on this\nCoordinator.\n", + "operationId": "disableFoxxDevelopmentMode", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Disable the development mode", + "tags": [ + "Foxx" + ] + }, + "post": { + "description": "Puts the service into development mode.\n\nWhile the service is running in development mode the service will be reloaded\nfrom the filesystem and its setup script (if any) will be re-executed every\ntime the service handles a request.\n\nWhen running ArangoDB in a cluster with multiple Coordinators note that changes\nto the filesystem on one Coordinator will not be reflected across the other\nCoordinators. This means you should treat your Coordinators as inconsistent\nas long as any service is running in development mode.\n", + "operationId": "enableFoxxDevelopmentMode", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Enable the development mode", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/download": { + "post": { + "description": "Downloads a zip bundle of the service directory.\n\nWhen development mode is enabled, this always creates a new bundle.\n\nOtherwise the bundle will represent the version of a service that\nis installed on that ArangoDB instance.\n", + "operationId": "downloadFoxxService", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + }, + "400": { + "description": "Returned if the mount path is unknown.\n" + } + }, + "summary": "Download a service bundle", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/readme": { + "get": { + "description": "Fetches the service's README or README.md file's contents if any.\n", + "operationId": "getFoxxReadme", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + }, + "204": { + "description": "Returned if no README file was found.\n" + } + }, + "summary": "Get the service README", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/scripts": { + "get": { + "description": "Fetches a list of the scripts defined by the service.\n\nReturns an object mapping the raw script names to human-friendly names.\n", + "operationId": "listFoxxScripts", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "List the service scripts", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/scripts/{name}": { + "post": { + "description": "Runs the given script for the service at the given mount path.\n\nReturns the exports of the script, if any.\n", + "operationId": "runFoxxScript", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Name of the script to run.\n", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "data": { + "description": "An arbitrary JSON value that will be parsed and passed to the\nscript as its first argument.\n", + "type": "json" + } + }, + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Run a service script", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/service": { + "delete": { + "description": "Removes the service at the given mount path from the database and file system.\n\nReturns an empty response on success.\n", + "operationId": "deleteFoxxService", + "parameters": [ + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Set to `false` to not run the service's teardown script.\n", + "in": "query", + "name": "teardown", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "204": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Uninstall a service", + "tags": [ + "Foxx" + ] + }, + "get": { + "description": "Fetches detailed information for the service at the given mount path.\n\nReturns an object with the following attributes:\n\n- `mount`: the mount path of the service\n- `path`: the local file system path of the service\n- `development`: `true` if the service is running in development mode\n- `legacy`: `true` if the service is running in 2.8 legacy compatibility mode\n- `manifest`: the normalized JSON manifest of the service\n\nAdditionally the object may contain the following attributes if they have been set on the manifest:\n\n- `name`: a string identifying the service type\n- `version`: a semver-compatible version string\n", + "operationId": "getFoxxServiceDescription", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + }, + "400": { + "description": "Returned if the mount path is unknown.\n" + } + }, + "summary": "Get the service description", + "tags": [ + "Foxx" + ] + }, + "patch": { + "description": "Installs the given new service on top of the service currently installed at the given mount path.\nThis is only recommended for switching between different versions of the same service.\n\nUnlike replacing a service, upgrading a service retains the old service's configuration\nand dependencies (if any) and should therefore only be used to migrate an existing service\nto a newer or equivalent service.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- `configuration`: a JSON object describing configuration values\n- `dependencies`: a JSON object describing dependency settings\n- `source`: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the `source` field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf `source` is a URL, the URL must be reachable from the server.\nIf `source` is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple Coordinators\nthe file system path must resolve to equivalent files on every Coordinator.\n", + "operationId": "upgradeFoxxService", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Set to `true` to run the old service's teardown script.\n", + "in": "query", + "name": "teardown", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `false` to not run the new service's setup script.\n", + "in": "query", + "name": "setup", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `true` to install the new service in 2.8 legacy compatibility mode.\n", + "in": "query", + "name": "legacy", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `true` to force service install even if no service is installed under given mount.\n", + "in": "query", + "name": "force", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Upgrade a service", + "tags": [ + "Foxx" + ] + }, + "put": { + "description": "Removes the service at the given mount path from the database and file system.\nThen installs the given new service at the same mount path.\n\nThis is a slightly safer equivalent to performing an uninstall of the old service\nfollowed by installing the new service. The new service's main and script files\n(if any) will be checked for basic syntax errors before the old service is removed.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- `configuration`: a JSON object describing configuration values\n- `dependencies`: a JSON object describing dependency settings\n- `source`: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the `source` field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf `source` is a URL, the URL must be reachable from the server.\nIf `source` is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple Coordinators\nthe file system path must resolve to equivalent files on every Coordinator.\n", + "operationId": "replaceFoxxService", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Set to `false` to not run the old service's teardown script.\n", + "in": "query", + "name": "teardown", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `false` to not run the new service's setup script.\n", + "in": "query", + "name": "setup", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `true` to install the new service in 2.8 legacy compatibility mode.\n", + "in": "query", + "name": "legacy", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Set to `true` to force service install even if no service is installed under given mount.\n", + "in": "query", + "name": "force", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Replace a service", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/swagger": { + "get": { + "description": "Fetches the Swagger API description for the service at the given mount path.\n\nThe response body will be an OpenAPI 2.0 compatible JSON description of the service API.\n", + "operationId": "getFoxxSwaggerDescription", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Get the Swagger description", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/foxx/tests": { + "post": { + "description": "Runs the tests for the service at the given mount path and returns the results.\n\nSupported test reporters are:\n\n- `default`: a simple list of test cases\n- `suite`: an object of test cases nested in suites\n- `stream`: a raw stream of test results\n- `xunit`: an XUnit/JUnit compatible structure\n- `tap`: a raw TAP compatible stream\n\nThe `Accept` request header can be used to further control the response format:\n\nWhen using the `stream` reporter `application/x-ldjson` will result\nin the response body being formatted as a newline-delimited JSON stream.\n\nWhen using the `tap` reporter `text/plain` or `text/*` will result\nin the response body being formatted as a plain text TAP report.\n\nWhen using the `xunit` reporter `application/xml` or `text/xml` will result\nin the response body being formatted as XML instead of JSONML.\n\nOtherwise the response body will be formatted as non-prettyprinted JSON.\n", + "operationId": "runFoxxTests", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Mount path of the installed service.\n", + "in": "query", + "name": "mount", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Test reporter to use.\n", + "in": "query", + "name": "reporter", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "Use the matching format for the reporter, regardless of the `Accept` header.\n", + "in": "query", + "name": "idiomatic", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Only run tests where the full name (including full test suites and test case)\nmatches this string.\n", + "in": "query", + "name": "filter", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "Returned if the request was successful.\n" + } + }, + "summary": "Run the service tests", + "tags": [ + "Foxx" + ] + } + }, + "/_db/{database-name}/_api/gharial": { + "get": { + "description": "Lists all graphs stored in this database.\n", + "operationId": "listGraphs", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graphs": { + "description": "A list of all named graphs.\n", + "items": { + "properties": { + "graph": { + "description": "The properties of the named graph.\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "error", + "code", + "graphs" + ], + "type": "object" + } + } + }, + "description": "Is returned if the module is available and the graphs can be listed.\n" + } + }, + "summary": "List all graphs", + "tags": [ + "Graphs" + ] + }, + "post": { + "description": "The creation of a graph requires the name of the graph and a\ndefinition of its edges.\n", + "operationId": "createGraph", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Define if the request should wait until everything is synced to disk.\nChanges the success HTTP response status code.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether to create a Disjoint SmartGraph instead of a regular SmartGraph\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "isSmart": { + "description": "Define if the created graph should be smart (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "Name of the graph.\n", + "type": "string" + }, + "options": { + "description": "a JSON object to define options for creating collections within this graph.\nIt can contain the following attributes:\n", + "properties": { + "numberOfShards": { + "description": "The number of shards that is used for every collection within this graph.\nCannot be modified later.\n", + "type": "integer" + }, + "replicationFactor": { + "description": "The replication factor used when initially creating collections for this graph.\nCan be set to `\"satellite\"` to create a SatelliteGraph, which then ignores\n`numberOfShards`, `minReplicationFactor`, and `writeConcern`\n(Enterprise Edition only).\n", + "type": "integer" + }, + "satellites": { + "description": "An array of collection names that is used to create SatelliteCollections\nfor a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).\nEach array element must be a string and a valid collection name.\nThe collection type cannot be modified later.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "smartGraphAttribute": { + "description": "Only has effect in Enterprise Edition and it is required if isSmart is true.\nThe attribute name that is used to smartly shard the vertices of a graph.\nEvery vertex in this SmartGraph has to have this attribute.\nCannot be modified later.\n", + "type": "string" + }, + "writeConcern": { + "description": "Write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "numberOfShards", + "replicationFactor" + ], + "type": "object" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "name" + ], + "type": "object" + } + } + } + }, + "responses": { + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 201, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the newly created graph.\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "graph" + ], + "type": "object" + } + } + }, + "description": "Is returned if the graph can be created and `waitForSync` is enabled\nfor the `_graphs` collection, or given in the request.\nThe response body contains the graph configuration that has been stored.\n" + }, + "202": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 202, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the newly created graph.\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "graph" + ], + "type": "object" + } + } + }, + "description": "Is returned if the graph can be created and `waitForSync` is disabled\nfor the `_graphs` collection and not given in the request.\nThe response body contains the graph configuration that has been stored.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if the request is in a wrong format.\n" + }, + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if your user has insufficient rights.\nIn order to create a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n- `Read Only` access on every collection used within this graph.\n" + }, + "409": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 409, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if there is a conflict storing the graph. This can occur\neither if a graph with this name already exists, or if there is an\nedge definition with the same edge collection but different `from`\nand `to` vertex collections in any other graph.\n" + } + }, + "summary": "Create a graph", + "tags": [ + "Graphs" + ] + } + }, + "/_db/{database-name}/_api/gharial/{graph}": { + "delete": { + "description": "Drops an existing graph object by name.\nOptionally all collections not used by other graphs\ncan be dropped as well.\n", + "operationId": "deleteGraph", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Set to `true` to enable development mode.\n", - "in": "query", - "name": "development", - "required": false, + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Set to `false` to not run the service's setup script.\n", + "description": "Drop the collections of this graph as well. Collections are only\ndropped if they are not used in other graphs.\n", "in": "query", - "name": "setup", + "name": "dropCollections", "required": false, "schema": { "type": "boolean" } + } + ], + "responses": { + "202": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 202, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "removed": { + "description": "Always `true`.\n", + "example": true, + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "removed" + ], + "type": "object" + } + } + }, + "description": "Is returned if the graph can be dropped.\n" + }, + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if your user has insufficient rights.\nIn order to drop a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if no graph with this name can be found.\n" + } + }, + "summary": "Drop a graph", + "tags": [ + "Graphs" + ] + }, + "get": { + "description": "Selects information for a given graph.\nReturns the edge definitions as well as the orphan collections,\nor returns an error if the graph does not exist.\n", + "operationId": "getGraph", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the newly created graph\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "graph" + ], + "type": "object" + } + } + }, + "description": "Returns the graph if it can be found.\nThe result has the following format:\n" }, - { - "description": "Set to `true` to install the service in 2.8 legacy compatibility mode.\n", - "in": "query", - "name": "legacy", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "201": { - "description": "Returned if the request was successful.\n" + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if no graph with this name can be found.\n" } }, - "summary": "Install a new service", + "summary": "Get a graph", "tags": [ - "Foxx" + "Graphs" ] } }, - "/_api/foxx/commit": { - "post": { - "description": "Commits the local service state of the Coordinator to the database.\n\nThis can be used to resolve service conflicts between Coordinators that cannot be fixed automatically due to missing data.\n", - "operationId": "commitFoxxServiceState", + "/_db/{database-name}/_api/gharial/{graph}/edge": { + "get": { + "description": "Lists all edge collections within this graph.\n", + "operationId": "listEdgeCollections", "parameters": [ { - "description": "Overwrite existing service files in database even if they already exist.\n", - "in": "query", - "name": "replace", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } - } - ], - "responses": { - "204": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Commit the local service state", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/configuration": { - "get": { - "description": "Fetches the current configuration for the service at the given mount path.\n\nReturns an object mapping the configuration option names to their definitions\nincluding a human-friendly `title` and the `current` value (if any).\n", - "operationId": "getFoxxConfiguration", - "parameters": [ + }, { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", "required": true, "schema": { "type": "string" @@ -9908,22 +14348,99 @@ ], "responses": { "200": { - "description": "Returned if the request was successful.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "collections": { + "description": "A list of all edge collections used in the edge definitions\nof this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "collections" + ], + "type": "object" + } + } + }, + "description": "Is returned if the edge definitions can be listed.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if no graph with this name can be found.\n" } }, - "summary": "Get the configuration options", + "summary": "List edge collections", "tags": [ - "Foxx" + "Graphs" ] }, - "patch": { - "description": "Replaces the given service's configuration.\n\nReturns an object mapping all configuration option names to their new values.\n", - "operationId": "updateFoxxConfiguration", + "post": { + "description": "Adds an additional edge definition to the graph.\n\nThis edge definition has to contain a `collection` and an array of\neach `from` and `to` vertex collections. An edge definition can only\nbe added if this definition is either not used in any other graph, or\nit is used with exactly the same definition. For example, it is not\npossible to store a definition \"e\" from \"v1\" to \"v2\" in one graph, and\n\"e\" from \"v2\" to \"v1\" in another graph, but both can have \"e\" from\n\"v1\" to \"v2\".\n\nAdditionally, collection creation options can be set.\n", + "operationId": "createEdgeDefinition", "parameters": [ { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", "required": true, "schema": { "type": "string" @@ -9935,783 +14452,1260 @@ "application/json": { "schema": { "properties": { + "collection": { + "description": "The name of the edge collection to be used.\n", + "type": "string" + }, + "from": { + "description": "One or many vertex collections that can contain source vertices.\n", + "items": { + "type": "string" + }, + "type": "array" + }, "options": { - "description": "A JSON object mapping configuration option names to their new values.\nAny omitted options will be ignored.\n", + "description": "A JSON object to set options for creating collections within this\nedge definition.\n", + "properties": { + "satellites": { + "description": "An array of collection names that is used to create SatelliteCollections\nfor a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).\nEach array element must be a string and a valid collection name.\nThe collection type cannot be modified later.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, "type": "object" + }, + "to": { + "description": "One or many vertex collections that can contain target vertices.\n", + "items": { + "type": "string" + }, + "type": "array" } }, "required": [ - "options" + "collection", + "from", + "to" ], "type": "object" } - } - } - }, - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Update the configuration options", - "tags": [ - "Foxx" - ] - }, - "put": { - "description": "Replaces the given service's configuration completely.\n\nReturns an object mapping all configuration option names to their new values.\n", - "operationId": "replaceFoxxConfiguration", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "options": { - "description": "A JSON object mapping configuration option names to their new values.\nAny omitted options will be reset to their default values or marked as unconfigured.\n", - "type": "object" - } - }, - "required": [ - "options" - ], - "type": "object" + } + } + }, + "responses": { + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 201, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the modified graph.\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "graph" + ], + "type": "object" + } + } + }, + "description": "Returned if the definition can be added successfully and\n`waitForSync` is enabled for the `_graphs` collection.\nThe response body contains the graph configuration that has been stored.\n" + }, + "202": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 202, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the modified graph.\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "graph" + ], + "type": "object" + } + } + }, + "description": "Returned if the definition can be added successfully and\n`waitForSync` is disabled for the `_graphs` collection.\nThe response body contains the graph configuration that has been stored.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } } - } - } - }, - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Replace the configuration options", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/dependencies": { - "get": { - "description": "Fetches the current dependencies for service at the given mount path.\n\nReturns an object mapping the dependency names to their definitions\nincluding a human-friendly `title` and the `current` mount path (if any).\n", - "operationId": "getFoxxDependencies", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Get the dependency options", - "tags": [ - "Foxx" - ] - }, - "patch": { - "description": "Replaces the given service's dependencies.\n\nReturns an object mapping all dependency names to their new mount paths.\n", - "operationId": "updateFoxxDependencies", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "options": { - "description": "A JSON object mapping dependency names to their new mount paths.\nAny omitted dependencies will be ignored.\n", - "type": "object" - } - }, - "required": [ - "options" - ], - "type": "object" + }, + "description": "Returned if the edge definition can not be added.\nThis can be because it is ill-formed, or if there is an\nedge definition with the same edge collection but different `from`\nand `to` vertex collections in any other graph.\n" + }, + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } } - } - } - }, - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Update the dependency options", - "tags": [ - "Foxx" - ] - }, - "put": { - "description": "Replaces the given service's dependencies completely.\n\nReturns an object mapping all dependency names to their new mount paths.\n", - "operationId": "replaceFoxxDependencies", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "options": { - "description": "A JSON object mapping dependency names to their new mount paths.\nAny omitted dependencies will be disabled.\n", - "type": "object" - } - }, - "required": [ - "options" - ], - "type": "object" + }, + "description": "Returned if your user has insufficient rights.\nIn order to modify a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } } - } - } - }, - "responses": { - "200": { - "description": "Returned if the request was successful.\n" + }, + "description": "Returned if no graph with this name can be found.\n" } }, - "summary": "Replace the dependency options", + "summary": "Add an edge definition", "tags": [ - "Foxx" + "Graphs" ] } }, - "/_api/foxx/development": { + "/_db/{database-name}/_api/gharial/{graph}/edge/{collection}": { "delete": { - "description": "Puts the service at the given mount path into production mode.\n\nWhen running ArangoDB in a cluster with multiple Coordinators this will\nreplace the service on all other Coordinators with the version on this\nCoordinator.\n", - "operationId": "disableFoxxDevelopmentMode", + "description": "Remove one edge definition from the graph. This only removes the\nedge collection from the graph definition. The vertex collections of the\nedge definition become orphan collections but otherwise remain untouched\nand can still be used in your queries.\n", + "operationId": "deleteEdgeDefinition", "parameters": [ { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Disable the development mode", - "tags": [ - "Foxx" - ] - }, - "post": { - "description": "Puts the service into development mode.\n\nWhile the service is running in development mode the service will be reloaded\nfrom the filesystem and its setup script (if any) will be re-executed every\ntime the service handles a request.\n\nWhen running ArangoDB in a cluster with multiple Coordinators note that changes\nto the filesystem on one Coordinator will not be reflected across the other\nCoordinators. This means you should treat your Coordinators as inconsistent\nas long as any service is running in development mode.\n", - "operationId": "enableFoxxDevelopmentMode", - "parameters": [ + }, { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Enable the development mode", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/download": { - "post": { - "description": "Downloads a zip bundle of the service directory.\n\nWhen development mode is enabled, this always creates a new bundle.\n\nOtherwise the bundle will represent the version of a service that\nis installed on that ArangoDB instance.\n", - "operationId": "downloadFoxxService", - "parameters": [ + }, { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the edge collection used in the edge definition.\n", + "in": "path", + "name": "collection", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" }, - "400": { - "description": "Returned if the mount path is unknown.\n" - } - }, - "summary": "Download a service bundle", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/readme": { - "get": { - "description": "Fetches the service's README or README.md file's contents if any.\n", - "operationId": "getFoxxReadme", - "parameters": [ { - "description": "Mount path of the installed service.\n", + "description": "Define if the request should wait until synced to disk.\n", "in": "query", - "name": "mount", - "required": true, + "name": "waitForSync", + "required": false, "schema": { - "type": "string" + "type": "boolean" } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" }, - "204": { - "description": "Returned if no README file was found.\n" - } - }, - "summary": "Get the service README", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/scripts": { - "get": { - "description": "Fetches a list of the scripts defined by the service.\n\nReturns an object mapping the raw script names to human-friendly names.\n", - "operationId": "listFoxxScripts", - "parameters": [ { - "description": "Mount path of the installed service.\n", + "description": "Drop the edge collection in addition to removing it from the graph.\nThe collection is only dropped if it is not used in other graphs.\n", "in": "query", - "name": "mount", - "required": true, + "name": "dropCollections", + "required": false, "schema": { - "type": "string" + "type": "boolean" } } ], "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "List the service scripts", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/scripts/{name}": { - "post": { - "description": "Runs the given script for the service at the given mount path.\n\nReturns the exports of the script, if any.\n", - "operationId": "runFoxxScript", - "parameters": [ - { - "description": "Name of the script to run.\n", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string" - } + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 201, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the modified graph.\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "graph" + ], + "type": "object" + } + } + }, + "description": "Returned if the edge definition can be removed from the graph\nand `waitForSync` is `true`.\n" }, - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "data": { - "description": "An arbitrary JSON value that will be parsed and passed to the\nscript as its first argument.\n", - "type": "json" - } - }, - "type": "object" + "202": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 202, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the modified graph.\n", + "properties": { + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "type": "string" + }, + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", + "type": "string" + }, + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" + } + }, + "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", + "_id", + "_rev", + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "graph" + ], + "type": "object" + } } - } - } - }, - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Run a service script", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/service": { - "delete": { - "description": "Removes the service at the given mount path from the database and file system.\n\nReturns an empty response on success.\n", - "operationId": "deleteFoxxService", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, - "schema": { - "type": "string" - } + }, + "description": "Returned if the edge definition can be removed from the graph and\n`waitForSync` is `false`.\n" }, - { - "description": "Set to `false` to not run the service's teardown script.\n", - "in": "query", - "name": "teardown", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "204": { - "description": "Returned if the request was successful.\n" + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if your user has insufficient rights.\nIn order to drop a vertex, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if no graph with this name can be found,\nor if no edge definition with this name is found in the graph.\n" } }, - "summary": "Uninstall a service", + "summary": "Remove an edge definition", "tags": [ - "Foxx" + "Graphs" ] }, - "get": { - "description": "Fetches detailed information for the service at the given mount path.\n\nReturns an object with the following attributes:\n\n- `mount`: the mount path of the service\n- `path`: the local file system path of the service\n- `development`: `true` if the service is running in development mode\n- `legacy`: `true` if the service is running in 2.8 legacy compatibility mode\n- `manifest`: the normalized JSON manifest of the service\n\nAdditionally the object may contain the following attributes if they have been set on the manifest:\n\n- `name`: a string identifying the service type\n- `version`: a semver-compatible version string\n", - "operationId": "getFoxxServiceDescription", + "post": { + "description": "Creates a new edge in the specified collection.\nWithin the body the edge has to contain a `_from` and `_to` value referencing to valid vertices in the graph.\nFurthermore, the edge has to be valid according to the edge definitions.\n", + "operationId": "createEdge", "parameters": [ { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" }, - "400": { - "description": "Returned if the mount path is unknown.\n" - } - }, - "summary": "Get the service description", - "tags": [ - "Foxx" - ] - }, - "patch": { - "description": "Installs the given new service on top of the service currently installed at the given mount path.\nThis is only recommended for switching between different versions of the same service.\n\nUnlike replacing a service, upgrading a service retains the old service's configuration\nand dependencies (if any) and should therefore only be used to migrate an existing service\nto a newer or equivalent service.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- `configuration`: a JSON object describing configuration values\n- `dependencies`: a JSON object describing dependency settings\n- `source`: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the `source` field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf `source` is a URL, the URL must be reachable from the server.\nIf `source` is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple Coordinators\nthe file system path must resolve to equivalent files on every Coordinator.\n", - "operationId": "upgradeFoxxService", - "parameters": [ { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", "required": true, "schema": { "type": "string" } }, { - "description": "Set to `true` to run the old service's teardown script.\n", - "in": "query", - "name": "teardown", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Set to `false` to not run the new service's setup script.\n", - "in": "query", - "name": "setup", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Set to `true` to install the new service in 2.8 legacy compatibility mode.\n", - "in": "query", - "name": "legacy", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Set to `true` to force service install even if no service is installed under given mount.\n", - "in": "query", - "name": "force", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Upgrade a service", - "tags": [ - "Foxx" - ] - }, - "put": { - "description": "Removes the service at the given mount path from the database and file system.\nThen installs the given new service at the same mount path.\n\nThis is a slightly safer equivalent to performing an uninstall of the old service\nfollowed by installing the new service. The new service's main and script files\n(if any) will be checked for basic syntax errors before the old service is removed.\n\nThe request body can be any of the following formats:\n\n- `application/zip`: a raw zip bundle containing a service\n- `application/javascript`: a standalone JavaScript file\n- `application/json`: a service definition as JSON\n- `multipart/form-data`: a service definition as a multipart form\n\nA service definition is an object or form with the following properties or fields:\n\n- `configuration`: a JSON object describing configuration values\n- `dependencies`: a JSON object describing dependency settings\n- `source`: a fully qualified URL or an absolute path on the server's file system\n\nWhen using multipart data, the `source` field can also alternatively be a file field\ncontaining either a zip bundle or a standalone JavaScript file.\n\nWhen using a standalone JavaScript file the given file will be executed\nto define our service's HTTP endpoints. It is the same which would be defined\nin the field `main` of the service manifest.\n\nIf `source` is a URL, the URL must be reachable from the server.\nIf `source` is a file system path, the path will be resolved on the server.\nIn either case the path or URL is expected to resolve to a zip bundle,\nJavaScript file or (in case of a file system path) directory.\n\nNote that when using file system paths in a cluster with multiple Coordinators\nthe file system path must resolve to equivalent files on every Coordinator.\n", - "operationId": "replaceFoxxService", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", + "description": "The name of the edge collection the edge belongs to.\n", + "in": "path", + "name": "collection", "required": true, "schema": { "type": "string" } }, { - "description": "Set to `false` to not run the old service's teardown script.\n", - "in": "query", - "name": "teardown", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Set to `false` to not run the new service's setup script.\n", + "description": "Define if the request should wait until synced to disk.\n", "in": "query", - "name": "setup", + "name": "waitForSync", "required": false, "schema": { "type": "boolean" } }, { - "description": "Set to `true` to install the new service in 2.8 legacy compatibility mode.\n", + "description": "Define if the response should contain the complete\nnew version of the document.\n", "in": "query", - "name": "legacy", + "name": "returnNew", "required": false, "schema": { "type": "boolean" } }, { - "description": "Set to `true` to force service install even if no service is installed under given mount.\n", - "in": "query", - "name": "force", + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Replace a service", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/swagger": { - "get": { - "description": "Fetches the Swagger API description for the service at the given mount path.\n\nThe response body will be an OpenAPI 2.0 compatible JSON description of the service API.\n", - "operationId": "getFoxxSwaggerDescription", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, "schema": { "type": "string" } } ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "_from": { + "description": "The source vertex of this edge. Has to be valid within\nthe used edge definition.\n", + "type": "string" + }, + "_to": { + "description": "The target vertex of this edge. Has to be valid within\nthe used edge definition.\n", + "type": "string" + } + }, + "required": [ + "_from", + "_to" + ], + "type": "object" + } + } } }, - "summary": "Get the Swagger description", - "tags": [ - "Foxx" - ] - } - }, - "/_api/foxx/tests": { - "post": { - "description": "Runs the tests for the service at the given mount path and returns the results.\n\nSupported test reporters are:\n\n- `default`: a simple list of test cases\n- `suite`: an object of test cases nested in suites\n- `stream`: a raw stream of test results\n- `xunit`: an XUnit/JUnit compatible structure\n- `tap`: a raw TAP compatible stream\n\nThe `Accept` request header can be used to further control the response format:\n\nWhen using the `stream` reporter `application/x-ldjson` will result\nin the response body being formatted as a newline-delimited JSON stream.\n\nWhen using the `tap` reporter `text/plain` or `text/*` will result\nin the response body being formatted as a plain text TAP report.\n\nWhen using the `xunit` reporter `application/xml` or `text/xml` will result\nin the response body being formatted as XML instead of JSONML.\n\nOtherwise the response body will be formatted as non-prettyprinted JSON.\n", - "operationId": "runFoxxTests", - "parameters": [ - { - "description": "Mount path of the installed service.\n", - "in": "query", - "name": "mount", - "required": true, - "schema": { - "type": "string" - } + "responses": { + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 201, + "type": "integer" + }, + "edge": { + "description": "The internal attributes for the edge.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "new": { + "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "edge" + ], + "type": "object" + } + } + }, + "description": "Returned if the edge can be created and `waitForSync` is `true`.\n" }, - { - "description": "Test reporter to use.\n", - "in": "query", - "name": "reporter", - "required": false, - "schema": { - "type": "string" - } + "202": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 202, + "type": "integer" + }, + "edge": { + "description": "The internal attributes for the edge.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "new": { + "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + } + }, + "required": [ + "error", + "code", + "edge" + ], + "type": "object" + } + } + }, + "description": "Returned if the request was successful but `waitForSync` is `false`.\n" }, - { - "description": "Use the matching format for the reporter, regardless of the `Accept` header.\n", - "in": "query", - "name": "idiomatic", - "required": false, - "schema": { - "type": "boolean" - } + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if the input document is invalid.\nThis can for instance be the case if the `_from` or `_to` attribute is missing\nor malformed.\n" }, - { - "description": "Only run tests where the full name (including full test suites and test case)\nmatches this string.\n", - "in": "query", - "name": "filter", - "required": false, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "Returned if the request was successful.\n" - } - }, - "summary": "Run the service tests", - "tags": [ - "Foxx" - ] - } - }, - "/_api/gharial": { - "get": { - "description": "Lists all graphs stored in this database.\n", - "operationId": "listGraphs", - "responses": { - "200": { + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if your user has insufficient rights.\nIn order to insert edges into the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned in any of the following cases:\n- The graph cannot be found.\n- The edge collection is not part of the graph.\n- The vertex collection referenced in the `_from` or `_to` attribute is not part of the graph.\n- The vertex collection is part of the graph, but does not exist.\n- `_from` or `_to` vertex does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 200, + "example": 410, "type": "integer" }, "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, + "description": "A flag indicating that an error occurred.\n", + "example": true, "type": "boolean" }, - "graphs": { - "description": "A list of all named graphs.\n", - "items": { - "properties": { - "graph": { - "description": "The properties of the named graph.\n", - "properties": { - "_id": { - "description": "The internal id value of this graph.\n", - "type": "string" - }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", - "type": "string" - }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "name": { - "description": "The name of the graph.\n", - "type": "string" - }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" - }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" - }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", - "type": "string" - }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" - } - }, - "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", - "_id", - "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" - ], - "type": "object" - } - }, - "type": "object" - }, - "type": "array" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ "error", "code", - "graphs" + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Is returned if the module is available and the graphs can be listed.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" } }, - "summary": "List all graphs", + "summary": "Create an edge", "tags": [ "Graphs" ] }, - "post": { - "description": "The creation of a graph requires the name of the graph and a\ndefinition of its edges.\n", - "operationId": "createGraph", + "put": { + "description": "Change the vertex collections of one specific edge definition.\nThis modifies all occurrences of this definition in all graphs known to your database.\n", + "operationId": "replaceEdgeDefinition", "parameters": [ { - "description": "Define if the request should wait until everything is synced to disk.\nChanges the success HTTP response status code.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the edge collection used in the edge definition.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Define if the request should wait until synced to disk.\n", "in": "query", "name": "waitForSync", "required": false, "schema": { "type": "boolean" } + }, + { + "description": "Drop the edge collection in addition to removing it from the graph.\nThe collection is only dropped if it is not used in other graphs.\n", + "in": "query", + "name": "dropCollections", + "required": false, + "schema": { + "type": "boolean" + } } ], "requestBody": { @@ -10719,85 +15713,32 @@ "application/json": { "schema": { "properties": { - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "collection": { + "description": "The name of the edge collection to modify.\n", + "type": "string" + }, + "from": { + "description": "One or many vertex collections that can contain source vertices.\n", "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" + "type": "string" }, "type": "array" }, - "isDisjoint": { - "description": "Whether to create a Disjoint SmartGraph instead of a regular SmartGraph\n(Enterprise Edition only).\n", - "type": "boolean" - }, - "isSmart": { - "description": "Define if the created graph should be smart (Enterprise Edition only).\n", - "type": "boolean" - }, - "name": { - "description": "Name of the graph.\n", - "type": "string" - }, "options": { - "description": "a JSON object to define options for creating collections within this graph.\nIt can contain the following attributes:\n", + "description": "A JSON object to set options for modifying collections within this\nedge definition.\n", "properties": { - "numberOfShards": { - "description": "The number of shards that is used for every collection within this graph.\nCannot be modified later.\n", - "type": "integer" - }, - "replicationFactor": { - "description": "The replication factor used when initially creating collections for this graph.\nCan be set to `\"satellite\"` to create a SatelliteGraph, which then ignores\n`numberOfShards`, `minReplicationFactor`, and `writeConcern`\n(Enterprise Edition only).\n", - "type": "integer" - }, "satellites": { "description": "An array of collection names that is used to create SatelliteCollections\nfor a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).\nEach array element must be a string and a valid collection name.\nThe collection type cannot be modified later.\n", "items": { "type": "string" - }, - "type": "array" - }, - "smartGraphAttribute": { - "description": "Only has effect in Enterprise Edition and it is required if isSmart is true.\nThe attribute name that is used to smartly shard the vertices of a graph.\nEvery vertex in this SmartGraph has to have this attribute.\nCannot be modified later.\n", - "type": "string" - }, - "writeConcern": { - "description": "Write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" + }, + "type": "array" } }, - "required": [ - "numberOfShards", - "replicationFactor" - ], "type": "object" }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "to": { + "description": "One or many vertex collections that can contain target vertices.\n", "items": { "type": "string" }, @@ -10805,7 +15746,9 @@ } }, "required": [ - "name" + "collection", + "from", + "to" ], "type": "object" } @@ -10829,7 +15772,7 @@ "type": "boolean" }, "graph": { - "description": "The information about the newly created graph.\n", + "description": "The information about the modified graph.\n", "properties": { "_id": { "description": "The internal id value of this graph.\n", @@ -10935,7 +15878,7 @@ } } }, - "description": "Is returned if the graph can be created and `waitForSync` is enabled\nfor the `_graphs` collection, or given in the request.\nThe response body contains the graph configuration that has been stored.\n" + "description": "Returned if the request was successful and `waitForSync` is `true`.\n" }, "202": { "content": { @@ -10953,7 +15896,7 @@ "type": "boolean" }, "graph": { - "description": "The information about the newly created graph.\n", + "description": "The information about the modified graph.\n", "properties": { "_id": { "description": "The internal id value of this graph.\n", @@ -11059,7 +16002,7 @@ } } }, - "description": "Is returned if the graph can be created and `waitForSync` is disabled\nfor the `_graphs` collection and not given in the request.\nThe response body contains the graph configuration that has been stored.\n" + "description": "Returned if the request was successful but `waitForSync` is `false`.\n" }, "400": { "content": { @@ -11081,7 +16024,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11095,7 +16038,7 @@ } } }, - "description": "Returned if the request is in a wrong format.\n" + "description": "Returned if the new edge definition is ill-formed and cannot be used.\n" }, "403": { "content": { @@ -11117,7 +16060,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11131,16 +16074,16 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to create a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n- `Read Only` access on every collection used within this graph.\n" + "description": "Returned if your user has insufficient rights.\nIn order to drop a vertex, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" }, - "409": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 409, + "example": 404, "type": "integer" }, "error": { @@ -11153,7 +16096,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11167,20 +16110,30 @@ } } }, - "description": "Returned if there is a conflict storing the graph. This can occur\neither if a graph with this name already exists, or if there is an\nedge definition with the same edge collection but different `from`\nand `to` vertex collections in any other graph.\n" + "description": "Returned if no graph with this name can be found, or if no edge definition\nwith this name is found in the graph.\n" } }, - "summary": "Create a graph", + "summary": "Replace an edge definition", "tags": [ "Graphs" ] } }, - "/_api/gharial/{graph}": { + "/_db/{database-name}/_api/gharial/{graph}/edge/{collection}/{edge}": { "delete": { - "description": "Drops an existing graph object by name.\nOptionally all collections not used by other graphs\ncan be dropped as well.\n", - "operationId": "deleteGraph", + "description": "Removes an edge from the collection.\n", + "operationId": "deleteEdge", "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "The name of the graph.\n", "in": "path", @@ -11190,57 +16143,270 @@ "type": "string" } }, - { - "description": "Drop the collections of this graph as well. Collections are only\ndropped if they are not used in other graphs.\n", - "in": "query", - "name": "dropCollections", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "202": { + { + "description": "The name of the edge collection the edge belongs to.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The `_key` attribute of the edge.\n", + "in": "path", + "name": "edge", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Define if the request should wait until synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "in": "header", + "name": "if-match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "old": { + "description": "The complete deleted edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "removed": { + "description": "Is set to true if the remove was successful.\n", + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "removed" + ], + "type": "object" + } + } + }, + "description": "Returned if the edge can be removed.\n" + }, + "202": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 202, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "old": { + "description": "The complete deleted edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "removed": { + "description": "Is set to true if the remove was successful.\n", + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "removed" + ], + "type": "object" + } + } + }, + "description": "Returned if the request was successful but `waitForSync` is `false`.\n" + }, + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if your user has insufficient rights.\nIn order to delete vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + }, + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 202, + "example": 404, "type": "integer" }, "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "removed": { - "description": "Always `true`.\n", + "description": "A flag indicating that an error occurred.\n", "example": true, "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ "error", "code", - "removed" + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Is returned if the graph can be dropped.\n" + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The edge to remove does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, - "403": { + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 403, + "example": 410, "type": "integer" }, "error": { @@ -11253,7 +16419,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11267,16 +16433,16 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to drop a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" }, - "404": { + "412": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 412, "type": "integer" }, "error": { @@ -11289,7 +16455,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11303,18 +16469,28 @@ } } }, - "description": "Returned if no graph with this name can be found.\n" + "description": "Returned if if-match header is given, but the stored documents revision is different.\n" } }, - "summary": "Drop a graph", + "summary": "Remove an edge", "tags": [ "Graphs" ] }, "get": { - "description": "Selects information for a given graph.\nReturns the edge definitions as well as the orphan collections,\nor returns an error if the graph does not exist.\n", - "operationId": "getGraph", + "description": "Gets an edge from the given collection.\n", + "operationId": "getEdge", "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "The name of the graph.\n", "in": "path", @@ -11323,6 +16499,60 @@ "schema": { "type": "string" } + }, + { + "description": "The name of the edge collection the edge belongs to.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The `_key` attribute of the edge.\n", + "in": "path", + "name": "edge", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Must contain a revision.\nIf this is set a document is only returned if\nit has exactly this revision.\nAlso see if-match header as an alternative to this.\n", + "in": "query", + "name": "rev", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "in": "header", + "name": "if-match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nonly if it has a different revision as the given ETag. Otherwise a HTTP 304 is returned.\n", + "in": "header", + "name": "if-none-match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } } ], "responses": { @@ -11336,119 +16566,127 @@ "example": 200, "type": "integer" }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "graph": { - "description": "The information about the newly created graph\n", + "edge": { + "description": "The complete edge.\n", "properties": { - "_id": { - "description": "The internal id value of this graph.\n", + "_from": { + "description": "The _from value of the stored data.\n", "type": "string" }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "_id": { + "description": "The _id value of the stored data.\n", "type": "string" }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "name": { - "description": "The name of the graph.\n", + "_key": { + "description": "The _key value of the stored data.\n", "type": "string" }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" - }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" - }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" } }, "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", "_id", + "_key", "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" + "_from", + "_to" ], "type": "object" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "edge" + ], + "type": "object" + } + } + }, + "description": "Returned if the edge can be found.\n" + }, + "304": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 304, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ "error", "code", - "graph" + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Returns the graph if it can be found.\nThe result has the following format:\n" + "description": "Returned if the if-none-match header is given and the\ncurrently stored edge still has this revision value.\nSo there was no update between the last time the edge\nwas fetched by the caller.\n" + }, + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if your user has insufficient rights.\nIn order to update vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Read Only` access on the given collection.\n" }, "404": { "content": { @@ -11470,7 +16708,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11484,73 +16722,52 @@ } } }, - "description": "Returned if no graph with this name can be found.\n" - } - }, - "summary": "Get a graph", - "tags": [ - "Graphs" - ] - } - }, - "/_api/gharial/{graph}/edge": { - "get": { - "description": "Lists all edge collections within this graph.\n", - "operationId": "listEdgeCollections", - "parameters": [ - { - "description": "The name of the graph.\n", - "in": "path", - "name": "graph", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The edge does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 200, + "example": 410, "type": "integer" }, - "collections": { - "description": "A list of all edge collections used in the edge definitions\nof this graph.\n", - "items": { - "type": "string" - }, - "type": "array" - }, "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, + "description": "A flag indicating that an error occurred.\n", + "example": true, "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ "error", "code", - "collections" + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Is returned if the edge definitions can be listed.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" }, - "404": { + "412": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 412, "type": "integer" }, "error": { @@ -11563,7 +16780,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11577,18 +16794,28 @@ } } }, - "description": "Returned if no graph with this name can be found.\n" + "description": "Returned if if-match header is given, but the stored documents revision is different.\n" } }, - "summary": "List edge collections", + "summary": "Get an edge", "tags": [ "Graphs" ] }, - "post": { - "description": "Adds an additional edge definition to the graph.\n\nThis edge definition has to contain a `collection` and an array of\neach `from` and `to` vertex collections. An edge definition can only\nbe added if this definition is either not used in any other graph, or\nit is used with exactly the same definition. For example, it is not\npossible to store a definition \"e\" from \"v1\" to \"v2\" in one graph, and\n\"e\" from \"v2\" to \"v1\" in another graph, but both can have \"e\" from\n\"v1\" to \"v2\".\n\nAdditionally, collection creation options can be set.\n", - "operationId": "createEdgeDefinition", + "patch": { + "description": "Partially modify the data of the specific edge in the collection.\n", + "operationId": "updateEdge", "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "The name of the graph.\n", "in": "path", @@ -11597,6 +16824,78 @@ "schema": { "type": "string" } + }, + { + "description": "The name of the edge collection the edge belongs to.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The `_key` attribute of the vertex.\n", + "in": "path", + "name": "edge", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Define if the request should wait until synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if values set to `null` should be stored.\nBy default (`true`), the given documents attribute(s) are set to `null`.\nIf this parameter is set to `false`, top-level attribute and sub-attributes with\na `null` value in the request are removed from the document (but not attributes\nof objects that are nested inside of arrays).\n", + "in": "query", + "name": "keepNull", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if a presentation of the new document should\nbe returned within the response object.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "in": "header", + "name": "if-match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } } ], "requestBody": { @@ -11604,42 +16903,13 @@ "application/json": { "schema": { "properties": { - "collection": { - "description": "The name of the edge collection to be used.\n", - "type": "string" - }, - "from": { - "description": "One or many vertex collections that can contain source vertices.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "options": { - "description": "A JSON object to set options for creating collections within this\nedge definition.\n", - "properties": { - "satellites": { - "description": "An array of collection names that is used to create SatelliteCollections\nfor a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).\nEach array element must be a string and a valid collection name.\nThe collection type cannot be modified later.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, + "edge": { + "description": "The body has to contain a JSON object containing exactly the attributes that should be overwritten, all other attributes remain unchanged.\n", "type": "object" - }, - "to": { - "description": "One or many vertex collections that can contain target vertices.\n", - "items": { - "type": "string" - }, - "type": "array" } }, "required": [ - "collection", - "from", - "to" + "edge" ], "type": "object" } @@ -11647,115 +16917,117 @@ } }, "responses": { - "201": { + "200": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 201, + "example": 200, "type": "integer" }, + "edge": { + "description": "The internal attributes for the edge.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, "error": { "description": "A flag indicating that no error occurred.\n", "example": false, "type": "boolean" }, - "graph": { - "description": "The information about the modified graph.\n", + "new": { + "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { - "_id": { - "description": "The internal id value of this graph.\n", + "_from": { + "description": "The _from value of the stored data.\n", "type": "string" }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "_id": { + "description": "The _id value of the stored data.\n", "type": "string" }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "name": { - "description": "The name of the graph.\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "old": { + "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" } }, "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", "_id", + "_key", "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" + "_from", + "_to" ], "type": "object" } @@ -11763,13 +17035,13 @@ "required": [ "error", "code", - "graph" + "edge" ], "type": "object" } } }, - "description": "Returned if the definition can be added successfully and\n`waitForSync` is enabled for the `_graphs` collection.\nThe response body contains the graph configuration that has been stored.\n" + "description": "Returned if the edge can be updated, and `waitForSync` is `false`.\n" }, "202": { "content": { @@ -11781,105 +17053,107 @@ "example": 202, "type": "integer" }, + "edge": { + "description": "The internal attributes for the edge.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, "error": { "description": "A flag indicating that no error occurred.\n", "example": false, "type": "boolean" }, - "graph": { - "description": "The information about the modified graph.\n", + "new": { + "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { - "_id": { - "description": "The internal id value of this graph.\n", + "_from": { + "description": "The _from value of the stored data.\n", "type": "string" }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "_id": { + "description": "The _id value of the stored data.\n", "type": "string" }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "name": { - "description": "The name of the graph.\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "old": { + "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" } }, "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", "_id", + "_key", "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" + "_from", + "_to" ], "type": "object" } @@ -11887,22 +17161,22 @@ "required": [ "error", "code", - "graph" + "edge" ], "type": "object" } } }, - "description": "Returned if the definition can be added successfully and\n`waitForSync` is disabled for the `_graphs` collection.\nThe response body contains the graph configuration that has been stored.\n" + "description": "Returned if the request was successful but `waitForSync` is `false`.\n" }, - "400": { + "403": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 400, + "example": 403, "type": "integer" }, "error": { @@ -11915,7 +17189,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11929,16 +17203,16 @@ } } }, - "description": "Returned if the edge definition can not be added.\nThis can be because it is ill-formed, or if there is an\nedge definition with the same edge collection but different `from`\nand `to` vertex collections in any other graph.\n" + "description": "Returned if your user has insufficient rights.\nIn order to update edges in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" }, - "403": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 403, + "example": 404, "type": "integer" }, "error": { @@ -11951,7 +17225,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -11965,16 +17239,16 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to modify a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The edge to update does not exist.\n- Either `_from` or `_to` vertex does not exist (if updated).\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, - "404": { + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 410, "type": "integer" }, "error": { @@ -11987,7 +17261,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -12001,20 +17275,64 @@ } } }, - "description": "Returned if no graph with this name can be found.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 412, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if if-match header is given, but the stored documents revision is different.\n" } }, - "summary": "Add an edge definition", + "summary": "Update an edge", "tags": [ "Graphs" ] - } - }, - "/_api/gharial/{graph}/edge/{collection}": { - "delete": { - "description": "Remove one edge definition from the graph. This only removes the\nedge collection from the graph definition. The vertex collections of the\nedge definition become orphan collections but otherwise remain untouched\nand can still be used in your queries.\n", - "operationId": "deleteEdgeDefinition", + }, + "put": { + "description": "Replaces the data of an edge in the collection.\n", + "operationId": "replaceEdge", "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "The name of the graph.\n", "in": "path", @@ -12025,7 +17343,7 @@ } }, { - "description": "The name of the edge collection used in the edge definition.\n", + "description": "The name of the edge collection the edge belongs to.\n", "in": "path", "name": "collection", "required": true, @@ -12033,6 +17351,15 @@ "type": "string" } }, + { + "description": "The `_key` attribute of the vertex.\n", + "in": "path", + "name": "edge", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "Define if the request should wait until synced to disk.\n", "in": "query", @@ -12043,15 +17370,74 @@ } }, { - "description": "Drop the collection as well.\nThe collection is only dropped if it is not used in other graphs.\n", + "description": "Define if values set to `null` should be stored.\nBy default (`true`), the given documents attribute(s) are set to `null`.\nIf this parameter is set to `false`, top-level attribute and sub-attributes with\na `null` value in the request are removed from the document (but not attributes\nof objects that are nested inside of arrays).\n", "in": "query", - "name": "dropCollections", + "name": "keepNull", "required": false, "schema": { "type": "boolean" } + }, + { + "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if a presentation of the new document should\nbe returned within the response object.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "in": "header", + "name": "if-match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "_from": { + "description": "The source vertex of this edge. Has to be valid within\nthe used edge definition.\n", + "type": "string" + }, + "_to": { + "description": "The target vertex of this edge. Has to be valid within\nthe used edge definition.\n", + "type": "string" + } + }, + "required": [ + "_from", + "_to" + ], + "type": "object" + } + } + } + }, "responses": { "201": { "content": { @@ -12063,105 +17449,107 @@ "example": 201, "type": "integer" }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "graph": { - "description": "The information about the modified graph.\n", + "edge": { + "description": "The internal attributes for the edge\n", "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, "_id": { - "description": "The internal id value of this graph.\n", + "description": "The _id value of the stored data.\n", "type": "string" }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "_key": { + "description": "The _key value of the stored data.\n", "type": "string" }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "new": { + "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "name": { - "description": "The name of the graph.\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "old": { + "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" } }, "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", "_id", + "_key", "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" + "_from", + "_to" ], "type": "object" } @@ -12169,13 +17557,13 @@ "required": [ "error", "code", - "graph" + "edge" ], "type": "object" } } }, - "description": "Returned if the edge definition can be removed from the graph\nand `waitForSync` is `true`.\n" + "description": "Returned if the request was successful but `waitForSync` is `true`.\n" }, "202": { "content": { @@ -12187,105 +17575,107 @@ "example": 202, "type": "integer" }, + "edge": { + "description": "The internal attributes for the edge\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" + }, + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + }, + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, "error": { "description": "A flag indicating that no error occurred.\n", "example": false, "type": "boolean" }, - "graph": { - "description": "The information about the modified graph.\n", + "new": { + "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { - "_id": { - "description": "The internal id value of this graph.\n", + "_from": { + "description": "The _from value of the stored data.\n", "type": "string" }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "_id": { + "description": "The _id value of the stored data.\n", "type": "string" }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "name": { - "description": "The name of the graph.\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev", + "_from", + "_to" + ], + "type": "object" + }, + "old": { + "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_from": { + "description": "The _from value of the stored data.\n", + "type": "string" }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" + "_to": { + "description": "The _to value of the stored data.\n", + "type": "string" } }, "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", "_id", + "_key", "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" + "_from", + "_to" ], "type": "object" } @@ -12293,22 +17683,58 @@ "required": [ "error", "code", - "graph" + "edge" + ], + "type": "object" + } + } + }, + "description": "Returned if the request was successful but `waitForSync` is `false`.\n" + }, + "403": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 403, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Returned if the edge definition can be removed from the graph and\n`waitForSync` is `false`.\n" + "description": "Returned if your user has insufficient rights.\nIn order to replace edges in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" }, - "403": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 403, + "example": 404, "type": "integer" }, "error": { @@ -12321,7 +17747,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -12335,16 +17761,16 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to drop a vertex, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The edge to replace does not exist.\n- Either `_from` or `_to` vertex does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, - "404": { + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 410, "type": "integer" }, "error": { @@ -12357,7 +17783,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -12371,52 +17797,174 @@ } } }, - "description": "Returned if no graph with this name can be found,\nor if no edge definition with this name is found in the graph.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 412, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if if-match header is given, but the stored documents revision is different.\n" } }, - "summary": "Remove an edge definition", + "summary": "Replace an edge", "tags": [ "Graphs" ] - }, - "post": { - "description": "Creates a new edge in the specified collection.\nWithin the body the edge has to contain a `_from` and `_to` value referencing to valid vertices in the graph.\nFurthermore, the edge has to be valid according to the edge definitions.\n", - "operationId": "createEdge", + } + }, + "/_db/{database-name}/_api/gharial/{graph}/vertex": { + "get": { + "description": "Lists all vertex collections within this graph, including orphan collections.\n", + "operationId": "listVertexCollections", "parameters": [ { - "description": "The name of the graph.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "graph", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the edge collection the edge belongs to.\n", + "description": "The name of the graph.\n", "in": "path", - "name": "collection", + "name": "graph", "required": true, "schema": { "type": "string" } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "collections": { + "description": "The list of all vertex collections within this graph.\nIncludes the vertex collections used in edge definitions\nas well as orphan collections.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "collections" + ], + "type": "object" + } + } + }, + "description": "Is returned if the collections can be listed.\n" }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned if no graph with this name can be found.\n" + } + }, + "summary": "List vertex collections", + "tags": [ + "Graphs" + ] + }, + "post": { + "description": "Adds a vertex collection to the set of orphan collections of the graph.\nIf the collection does not exist, it is created.\n", + "operationId": "addVertexCollection", + "parameters": [ { - "description": "Define if the request should wait until synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Define if the response should contain the complete\nnew version of the document.\n", - "in": "query", - "name": "returnNew", - "required": false, + "description": "The name of the graph.\n", + "in": "path", + "name": "graph", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], @@ -12425,18 +17973,26 @@ "application/json": { "schema": { "properties": { - "_from": { - "description": "The source vertex of this edge. Has to be valid within\nthe used edge definition.\n", + "collection": { + "description": "The name of the vertex collection to add to the graph definition.\n", "type": "string" }, - "_to": { - "description": "The target vertex of this edge. Has to be valid within\nthe used edge definition.\n", - "type": "string" + "options": { + "description": "A JSON object to set options for creating vertex collections.\n", + "properties": { + "satellites": { + "description": "An array of collection names that is used to create SatelliteCollections\nfor a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).\nEach array element must be a string and a valid collection name.\nThe collection type cannot be modified later.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" } }, "required": [ - "_from", - "_to" + "collection" ], "type": "object" } @@ -12454,74 +18010,105 @@ "example": 201, "type": "integer" }, - "edge": { - "description": "The internal attributes for the edge.\n", + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "graph": { + "description": "The information about the modified graph.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", + "_id": { + "description": "The internal id value of this graph.\n", + "type": "string" + }, + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", "type": "string" }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev", - "_from", - "_to" - ], - "type": "object" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_from": { - "description": "The _from value of the stored data.\n", + "name": { + "description": "The name of the graph.\n", "type": "string" }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" }, - "_to": { - "description": "The _to value of the stored data.\n", + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" } }, "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", "_id", - "_key", "_rev", - "_from", - "_to" + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" ], "type": "object" } @@ -12529,13 +18116,13 @@ "required": [ "error", "code", - "edge" + "graph" ], "type": "object" } } }, - "description": "Returned if the edge can be created and `waitForSync` is `true`.\n" + "description": "Is returned if the collection can be created and `waitForSync` is enabled\nfor the `_graphs` collection, or given in the request.\nThe response body contains the graph configuration that has been stored.\n" }, "202": { "content": { @@ -12547,74 +18134,105 @@ "example": 202, "type": "integer" }, - "edge": { - "description": "The internal attributes for the edge.\n", - "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev", - "_from", - "_to" - ], - "type": "object" - }, "error": { "description": "A flag indicating that no error occurred.\n", "example": false, "type": "boolean" }, - "new": { - "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", + "graph": { + "description": "The information about the newly created graph\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { - "description": "The _id value of the stored data.\n", + "description": "The internal id value of this graph.\n", "type": "string" }, - "_key": { - "description": "The _key value of the stored data.\n", + "_rev": { + "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", "type": "string" }, - "_rev": { - "description": "The _rev value of the stored data.\n", + "edgeDefinitions": { + "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", + "items": { + "properties": { + "collection": { + "description": "Name of the edge collection, where the edges are stored in.\n", + "type": "string" + }, + "from": { + "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "to": { + "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "collection", + "from", + "to" + ], + "type": "object" + }, + "type": "array" + }, + "isDisjoint": { + "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "isSatellite": { + "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", + "type": "boolean" + }, + "isSmart": { + "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", + "type": "boolean" + }, + "name": { + "description": "The name of the graph.\n", "type": "string" }, - "_to": { - "description": "The _to value of the stored data.\n", + "numberOfShards": { + "description": "Number of shards created for every new collection in the graph.\n", + "type": "integer" + }, + "orphanCollections": { + "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "replicationFactor": { + "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", + "type": "integer" + }, + "smartGraphAttribute": { + "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", "type": "string" + }, + "writeConcern": { + "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", + "type": "integer" } }, "required": [ + "name", + "edgeDefinitions", + "orphanCollections", + "numberOfShards", "_id", - "_key", "_rev", - "_from", - "_to" + "replicationFactor", + "isSmart", + "isDisjoint", + "isSatellite" ], "type": "object" } @@ -12622,13 +18240,13 @@ "required": [ "error", "code", - "edge" + "graph" ], "type": "object" } } }, - "description": "Returned if the request was successful but `waitForSync` is `false`.\n" + "description": "Is returned if the collection can be created and `waitForSync` is disabled\nfor the `_graphs` collection, or given in the request.\nThe response body contains the graph configuration that has been stored.\n" }, "400": { "content": { @@ -12650,7 +18268,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -12664,7 +18282,7 @@ } } }, - "description": "Returned if the input document is invalid.\nThis can for instance be the case if the `_from` or `_to` attribute is missing\nor malformed.\n" + "description": "Returned if the request is in an invalid format.\n" }, "403": { "content": { @@ -12686,7 +18304,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -12700,7 +18318,7 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to insert edges into the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + "description": "Returned if your user has insufficient rights.\nIn order to modify a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n- `Read Only` access on every collection used within this graph.\n" }, "404": { "content": { @@ -12722,7 +18340,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -12736,111 +18354,67 @@ } } }, - "description": "Returned in any of the following cases:\n- No graph with this name can be found.\n- The edge collection is not part of the graph.\n- The vertex collection referenced in the `_from` or `_to` attribute is not part of the graph.\n- The vertex collection is part of the graph, but does not exist.\n- `_from` or `_to` vertex does not exist.\n" + "description": "Returned if no graph with this name can be found.\n" } }, - "summary": "Create an edge", + "summary": "Add a vertex collection", "tags": [ "Graphs" ] - }, - "put": { - "description": "Change one specific edge definition.\nThis modifies all occurrences of this definition in all graphs known to your database.\n", - "operationId": "replaceEdgeDefinition", + } + }, + "/_db/{database-name}/_api/gharial/{graph}/vertex/{collection}": { + "delete": { + "description": "Removes a vertex collection from the list of the graph's\norphan collections. It can optionally delete the collection if it is\nnot used in any other graph.\n\nYou cannot remove vertex collections that are used in one of the\nedge definitions of the graph. You need to modify or remove the\nedge definition first in order to fully remove a vertex collection from\nthe graph.\n", + "operationId": "deleteVertexCollection", "parameters": [ { - "description": "The name of the graph.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "graph", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the edge collection used in the edge definition.\n", + "description": "The name of the graph.\n", "in": "path", - "name": "collection", + "name": "graph", "required": true, "schema": { "type": "string" } }, { - "description": "Define if the request should wait until synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, + "description": "The name of the vertex collection.\n", + "in": "path", + "name": "collection", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Drop the collection as well.\nThe collection is only dropped if it is not used in other graphs.\n", + "description": "Drop the collection in addition to removing it from the graph.\nThe collection is only dropped if it is not used in other graphs.\n", "in": "query", - "name": "dropCollections", + "name": "dropCollection", "required": false, "schema": { "type": "boolean" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "collection": { - "description": "The name of the edge collection to be used.\n", - "type": "string" - }, - "from": { - "description": "One or many vertex collections that can contain source vertices.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "options": { - "description": "A JSON object to set options for modifying collections within this\nedge definition.\n", - "properties": { - "satellites": { - "description": "An array of collection names that is used to create SatelliteCollections\nfor a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).\nEach array element must be a string and a valid collection name.\nThe collection type cannot be modified later.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "type": "object" - }, - "to": { - "description": "One or many vertex collections that can contain target vertices.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - } - } - } - }, "responses": { - "201": { + "200": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 201, + "example": 200, "type": "integer" }, "error": { @@ -12849,7 +18423,7 @@ "type": "boolean" }, "graph": { - "description": "The information about the modified graph.\n", + "description": "The information about the newly created graph\n", "properties": { "_id": { "description": "The internal id value of this graph.\n", @@ -12955,7 +18529,7 @@ } } }, - "description": "Returned if the request was successful and `waitForSync` is `true`.\n" + "description": "Returned if the vertex collection was removed from the graph successfully\nand `waitForSync` is `true`.\n" }, "202": { "content": { @@ -12973,7 +18547,7 @@ "type": "boolean" }, "graph": { - "description": "The information about the modified graph.\n", + "description": "The information about the newly created graph\n", "properties": { "_id": { "description": "The internal id value of this graph.\n", @@ -13101,7 +18675,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13115,7 +18689,7 @@ } } }, - "description": "Returned if the new edge definition is ill-formed and cannot be used.\n" + "description": "Returned if the vertex collection is still used in an edge definition.\nIn this case it cannot be removed from the graph yet, it has to be\nremoved from the edge definition first.\n" }, "403": { "content": { @@ -13137,7 +18711,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13173,7 +18747,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13187,42 +18761,41 @@ } } }, - "description": "Returned if no graph with this name can be found, or if no edge definition\nwith this name is found in the graph.\n" + "description": "Returned if no graph with this name can be found.\n" } }, - "summary": "Replace an edge definition", + "summary": "Remove a vertex collection", "tags": [ "Graphs" ] - } - }, - "/_api/gharial/{graph}/edge/{collection}/{edge}": { - "delete": { - "description": "Removes an edge from the collection.\n", - "operationId": "deleteEdge", + }, + "post": { + "description": "Adds a vertex to the given collection.\n", + "operationId": "createVertex", "parameters": [ { - "description": "The name of the graph.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "graph", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the edge collection the edge belongs to.\n", + "description": "The name of the graph.\n", "in": "path", - "name": "collection", + "name": "graph", "required": true, "schema": { "type": "string" } }, { - "description": "The `_key` attribute of the edge.\n", + "description": "The name of the vertex collection the vertex should be inserted into.\n", "in": "path", - "name": "edge", + "name": "collection", "required": true, "schema": { "type": "string" @@ -13238,33 +18811,51 @@ } }, { - "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", + "description": "Define if the response should contain the complete\nnew version of the document.\n", "in": "query", - "name": "returnOld", + "name": "returnNew", "required": false, "schema": { "type": "boolean" } }, { - "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", "in": "header", - "name": "if-match", + "name": "x-arango-trx-id", "required": false, "schema": { "type": "string" } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "vertex": { + "description": "The body has to be the JSON object to be stored.\n", + "type": "object" + } + }, + "required": [ + "vertex" + ], + "type": "object" + } + } + } + }, "responses": { - "200": { + "201": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 200, + "example": 201, "type": "integer" }, "error": { @@ -13272,13 +18863,9 @@ "example": false, "type": "boolean" }, - "old": { - "description": "The complete deleted edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "new": { + "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -13290,36 +18877,49 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev" + ], + "type": "object" + }, + "vertex": { + "description": "The internal attributes for the vertex.\n", + "properties": { + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" }, - "_to": { - "description": "The _to value of the stored data.\n", + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" - }, - "removed": { - "description": "Is set to true if the remove was successful.\n", - "type": "boolean" } }, "required": [ "error", "code", - "removed" + "vertex" ], "type": "object" } } }, - "description": "Returned if the edge can be removed.\n" + "description": "Returned if the vertex can be added and `waitForSync` is `true`.\n" }, "202": { "content": { @@ -13336,13 +18936,9 @@ "example": false, "type": "boolean" }, - "old": { - "description": "The complete deleted edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "new": { + "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -13354,30 +18950,43 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev" + ], + "type": "object" + }, + "vertex": { + "description": "The internal attributes generated while storing the vertex.\nDoes not include any attribute given in request body.\n", + "properties": { + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" }, - "_to": { - "description": "The _to value of the stored data.\n", + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" - }, - "removed": { - "description": "Is set to true if the remove was successful.\n", - "type": "boolean" } }, "required": [ "error", "code", - "removed" + "vertex" ], "type": "object" } @@ -13405,7 +19014,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13419,7 +19028,7 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to delete vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + "description": "Returned if your user has insufficient rights.\nIn order to insert vertices into the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" }, "404": { "content": { @@ -13441,7 +19050,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13455,16 +19064,16 @@ } } }, - "description": "Returned in the following cases:\n- No graph with this name can be found.\n- This collection is not part of the graph.\n- The edge to remove does not exist.\n" + "description": "The graph cannot be found or the collection is not part of the graph.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, - "412": { + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 412, + "example": 410, "type": "integer" }, "error": { @@ -13477,7 +19086,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13491,18 +19100,30 @@ } } }, - "description": "Returned if if-match header is given, but the stored documents revision is different.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" } }, - "summary": "Remove an edge", + "summary": "Create a vertex", "tags": [ "Graphs" ] - }, - "get": { - "description": "Gets an edge from the given collection.\n", - "operationId": "getEdge", + } + }, + "/_db/{database-name}/_api/gharial/{graph}/vertex/{collection}/{vertex}": { + "delete": { + "description": "Removes a vertex from the collection.\n", + "operationId": "deleteVertex", "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "The name of the graph.\n", "in": "path", @@ -13513,7 +19134,7 @@ } }, { - "description": "The name of the edge collection the edge belongs to.\n", + "description": "The name of the vertex collection the vertex belongs to.\n", "in": "path", "name": "collection", "required": true, @@ -13522,25 +19143,34 @@ } }, { - "description": "The `_key` attribute of the edge.\n", + "description": "The `_key` attribute of the vertex.\n", "in": "path", - "name": "edge", + "name": "vertex", "required": true, "schema": { "type": "string" } }, { - "description": "Must contain a revision.\nIf this is set a document is only returned if\nit has exactly this revision.\nAlso see if-match header as an alternative to this.\n", + "description": "Define if the request should wait until synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", "in": "query", - "name": "rev", + "name": "returnOld", "required": false, "schema": { - "type": "string" + "type": "boolean" } }, { - "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", "in": "header", "name": "if-match", "required": false, @@ -13549,9 +19179,9 @@ } }, { - "description": "If the \"If-None-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nonly if it has a different revision as the given ETag. Otherwise a HTTP 304 is returned.\n", + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", "in": "header", - "name": "if-none-match", + "name": "x-arango-trx-id", "required": false, "schema": { "type": "string" @@ -13569,13 +19199,14 @@ "example": 200, "type": "integer" }, - "edge": { - "description": "The complete edge.\n", + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "old": { + "description": "The complete deleted vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -13587,46 +19218,93 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" }, + "removed": { + "description": "Is set to true if the remove was successful.\n", + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "removed" + ], + "type": "object" + } + } + }, + "description": "Returned if the vertex can be removed.\n" + }, + "202": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 202, + "type": "integer" + }, "error": { "description": "A flag indicating that no error occurred.\n", "example": false, "type": "boolean" + }, + "old": { + "description": "The complete deleted vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev" + ], + "type": "object" + }, + "removed": { + "description": "Is set to true if the remove was successful.\n", + "type": "boolean" } }, "required": [ "error", "code", - "edge" + "removed" ], "type": "object" } } }, - "description": "Returned if the edge can be found.\n" + "description": "Returned if the request was successful but `waitForSync` is `false`.\n" }, - "304": { + "403": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 304, + "example": 403, "type": "integer" }, "error": { @@ -13639,7 +19317,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13653,16 +19331,16 @@ } } }, - "description": "Returned if the if-none-match header is given and the\ncurrently stored edge still has this revision value.\nSo there was no update between the last time the edge\nwas fetched by the caller.\n" + "description": "Returned if your user has insufficient rights.\nIn order to delete vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" }, - "403": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 403, + "example": 404, "type": "integer" }, "error": { @@ -13675,7 +19353,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13689,16 +19367,16 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to update vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Read Only` access on the given collection.\n" + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The vertex to remove does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, - "404": { + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 410, "type": "integer" }, "error": { @@ -13711,7 +19389,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13725,7 +19403,7 @@ } } }, - "description": "Returned in the following cases:\n- No graph with this name can be found.\n- This collection is not part of the graph.\n- The edge does not exist.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" }, "412": { "content": { @@ -13747,7 +19425,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -13764,106 +19442,89 @@ "description": "Returned if if-match header is given, but the stored documents revision is different.\n" } }, - "summary": "Get an edge", + "summary": "Remove a vertex", "tags": [ "Graphs" ] }, - "patch": { - "description": "Partially modify the data of the specific edge in the collection.\n", - "operationId": "updateEdge", + "get": { + "description": "Gets a vertex from the given collection.\n", + "operationId": "getVertex", "parameters": [ { - "description": "The name of the graph.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "graph", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the edge collection the edge belongs to.\n", + "description": "The name of the graph.\n", "in": "path", - "name": "collection", + "name": "graph", "required": true, "schema": { "type": "string" } }, { - "description": "The `_key` attribute of the vertex.\n", + "description": "The name of the vertex collection the vertex belongs to.\n", "in": "path", - "name": "edge", + "name": "collection", "required": true, "schema": { "type": "string" } }, { - "description": "Define if the request should wait until synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, + "description": "The `_key` attribute of the vertex.\n", + "in": "path", + "name": "vertex", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Define if values set to `null` should be stored.\nBy default (`true`), the given documents attribute(s) are set to `null`.\nIf this parameter is set to `false`, top-level attribute and sub-attributes with\na `null` value in the request are removed from the document (but not attributes\nof objects that are nested inside of arrays).\n", + "description": "Must contain a revision.\nIf this is set a document is only returned if\nit has exactly this revision.\nAlso see if-match header as an alternative to this.\n", "in": "query", - "name": "keepNull", + "name": "rev", "required": false, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", - "in": "query", - "name": "returnOld", + "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an query parameter `rev`.\n", + "in": "header", + "name": "if-match", "required": false, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Define if a presentation of the new document should\nbe returned within the response object.\n", - "in": "query", - "name": "returnNew", + "description": "If the \"If-None-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nonly if it has a different revision as the given ETag. Otherwise a HTTP 304 is returned.\n", + "in": "header", + "name": "if-none-match", "required": false, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", "in": "header", - "name": "if-match", + "name": "x-arango-trx-id", "required": false, "schema": { "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "edge": { - "description": "The body has to contain a JSON object containing exactly the attributes that should be overwritten, all other attributes remain unchanged.\n", - "type": "object" - } - }, - "required": [ - "edge" - ], - "type": "object" - } - } - } - }, "responses": { "200": { "content": { @@ -13872,87 +19533,17 @@ "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 200, - "type": "integer" - }, - "edge": { - "description": "The internal attributes for the edge.\n", - "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev", - "_from", - "_to" - ], - "type": "object" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev", - "_from", - "_to" - ], - "type": "object" + "example": 200, + "type": "integer" }, - "old": { - "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "vertex": { + "description": "The complete vertex.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -13964,18 +19555,12 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" } @@ -13983,139 +19568,49 @@ "required": [ "error", "code", - "edge" + "vertex" ], "type": "object" } } }, - "description": "Returned if the edge can be updated, and `waitForSync` is `false`.\n" + "description": "Returned if the vertex can be found.\n" }, - "202": { + "304": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 202, + "example": 304, "type": "integer" }, - "edge": { - "description": "The internal attributes for the edge.\n", - "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev", - "_from", - "_to" - ], - "type": "object" - }, "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, + "description": "A flag indicating that an error occurred.\n", + "example": true, "type": "boolean" }, - "new": { - "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev", - "_from", - "_to" - ], - "type": "object" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" }, - "old": { - "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", - "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev", - "_from", - "_to" - ], - "type": "object" + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ "error", "code", - "edge" + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Returned if the request was successful but `waitForSync` is `false`.\n" + "description": "Returned if the if-none-match header is given and the\ncurrently stored vertex still has this revision value.\nSo there was no update between the last time the vertex\nwas fetched by the caller.\n" }, "403": { "content": { @@ -14137,7 +19632,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -14151,7 +19646,7 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to update edges in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + "description": "Returned if your user has insufficient rights.\nIn order to update vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Read Only` access on the given collection.\n" }, "404": { "content": { @@ -14173,7 +19668,43 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The vertex does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" + }, + "410": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 410, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -14187,7 +19718,7 @@ } } }, - "description": "Returned in the following cases:\n- No graph with this name can be found.\n- This collection is not part of the graph.\n- The edge to update does not exist.\n- Either `_from` or `_to` vertex does not exist (if updated).\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" }, "412": { "content": { @@ -14209,7 +19740,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -14226,15 +19757,25 @@ "description": "Returned if if-match header is given, but the stored documents revision is different.\n" } }, - "summary": "Update an edge", + "summary": "Get a vertex", "tags": [ "Graphs" ] }, - "put": { - "description": "Replaces the data of an edge in the collection.\n", - "operationId": "replaceEdge", + "patch": { + "description": "Updates the data of the specific vertex in the collection.\n", + "operationId": "updateVertex", "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "The name of the graph.\n", "in": "path", @@ -14245,7 +19786,7 @@ } }, { - "description": "The name of the edge collection the edge belongs to.\n", + "description": "The name of the vertex collection the vertex belongs to.\n", "in": "path", "name": "collection", "required": true, @@ -14256,7 +19797,7 @@ { "description": "The `_key` attribute of the vertex.\n", "in": "path", - "name": "edge", + "name": "vertex", "required": true, "schema": { "type": "string" @@ -14306,6 +19847,15 @@ "schema": { "type": "string" } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } } ], "requestBody": { @@ -14313,18 +19863,13 @@ "application/json": { "schema": { "properties": { - "_from": { - "description": "The source vertex of this edge. Has to be valid within\nthe used edge definition.\n", - "type": "string" - }, - "_to": { - "description": "The target vertex of this edge. Has to be valid within\nthe used edge definition.\n", - "type": "string" + "vertex": { + "description": "The body has to contain a JSON object containing exactly the attributes that should be overwritten, all other attributes remain unchanged.\n", + "type": "object" } }, "required": [ - "_from", - "_to" + "vertex" ], "type": "object" } @@ -14332,23 +19877,24 @@ } }, "responses": { - "201": { + "200": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 201, + "example": 200, "type": "integer" }, - "edge": { - "description": "The internal attributes for the edge\n", + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "new": { + "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -14360,33 +19906,18 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", + }, + "old": { + "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -14398,28 +19929,18 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" }, - "old": { - "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "vertex": { + "description": "The internal attributes for the vertex.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -14431,18 +19952,12 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" } @@ -14450,13 +19965,13 @@ "required": [ "error", "code", - "edge" + "vertex" ], "type": "object" } } }, - "description": "Returned if the request was successful but `waitForSync` is `true`.\n" + "description": "Returned if the vertex can be updated, and `waitForSync` is `true`.\n" }, "202": { "content": { @@ -14468,13 +19983,14 @@ "example": 202, "type": "integer" }, - "edge": { - "description": "The internal attributes for the edge\n", + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "new": { + "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -14486,33 +20002,18 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written edge document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", + "old": { + "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -14524,28 +20025,18 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" }, - "old": { - "description": "The complete overwritten edge document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "vertex": { + "description": "The internal attributes for the vertex.\n", "properties": { - "_from": { - "description": "The _from value of the stored data.\n", - "type": "string" - }, "_id": { "description": "The _id value of the stored data.\n", "type": "string" @@ -14557,18 +20048,12 @@ "_rev": { "description": "The _rev value of the stored data.\n", "type": "string" - }, - "_to": { - "description": "The _to value of the stored data.\n", - "type": "string" } }, "required": [ "_id", "_key", - "_rev", - "_from", - "_to" + "_rev" ], "type": "object" } @@ -14576,13 +20061,13 @@ "required": [ "error", "code", - "edge" + "vertex" ], "type": "object" } } }, - "description": "Returned if the request was successful but `waitForSync` is `false`.\n" + "description": "Returned if the request was successful, and `waitForSync` is `false`.\n" }, "403": { "content": { @@ -14604,7 +20089,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -14618,7 +20103,7 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to replace edges in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + "description": "Returned if your user has insufficient rights.\nIn order to update vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" }, "404": { "content": { @@ -14640,7 +20125,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -14654,16 +20139,16 @@ } } }, - "description": "Returned in the following cases:\n- No graph with this name can be found.\n- This collection is not part of the graph.\n- The edge to replace does not exist.\n- Either `_from` or `_to` vertex does not exist.\n" + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The vertex to update does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, - "412": { + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 412, + "example": 410, "type": "integer" }, "error": { @@ -14676,7 +20161,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -14690,73 +20175,16 @@ } } }, - "description": "Returned if if-match header is given, but the stored documents revision is different.\n" - } - }, - "summary": "Replace an edge", - "tags": [ - "Graphs" - ] - } - }, - "/_api/gharial/{graph}/vertex": { - "get": { - "description": "Lists all vertex collections within this graph, including orphan collections.\n", - "operationId": "listVertexCollections", - "parameters": [ - { - "description": "The name of the graph.\n", - "in": "path", - "name": "graph", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 200, - "type": "integer" - }, - "collections": { - "description": "The list of all vertex collections within this graph.\nIncludes the vertex collections used in edge definitions\nas well as orphan collections.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - } - }, - "required": [ - "error", - "code", - "collections" - ], - "type": "object" - } - } - }, - "description": "Is returned if the collections can be listed.\n" + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" }, - "404": { + "412": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 412, "type": "integer" }, "error": { @@ -14769,7 +20197,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -14783,18 +20211,28 @@ } } }, - "description": "Returned if no graph with this name can be found.\n" + "description": "Returned if if-match header is given, but the stored documents revision is different.\n" } }, - "summary": "List vertex collections", + "summary": "Update a vertex", "tags": [ "Graphs" ] }, - "post": { - "description": "Adds a vertex collection to the set of orphan collections of the graph.\nIf the collection does not exist, it is created.\n", - "operationId": "addVertexCollection", + "put": { + "description": "Replaces the data of a vertex in the collection.\n", + "operationId": "replaceVertex", "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "The name of the graph.\n", "in": "path", @@ -14803,6 +20241,78 @@ "schema": { "type": "string" } + }, + { + "description": "The name of the vertex collection the vertex belongs to.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The `_key` attribute of the vertex.\n", + "in": "path", + "name": "vertex", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Define if the request should wait until synced to disk.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if values set to `null` should be stored.\nBy default (`true`), the given documents attribute(s) are set to `null`.\nIf this parameter is set to `false`, top-level attribute and sub-attributes with\na `null` value in the request are removed from the document (but not attributes\nof objects that are nested inside of arrays).\n", + "in": "query", + "name": "keepNull", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", + "in": "query", + "name": "returnOld", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Define if a presentation of the new document should\nbe returned within the response object.\n", + "in": "query", + "name": "returnNew", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", + "in": "header", + "name": "if-match", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "To make this operation a part of a Stream Transaction, set this header to the\ntransaction ID returned by the `POST /_api/transaction/begin` call.\n", + "in": "header", + "name": "x-arango-trx-id", + "required": false, + "schema": { + "type": "string" + } } ], "requestBody": { @@ -14810,26 +20320,13 @@ "application/json": { "schema": { "properties": { - "collection": { - "description": "The name of the vertex collection to add to the graph definition.\n", - "type": "string" - }, - "options": { - "description": "A JSON object to set options for creating vertex collections.\n", - "properties": { - "satellites": { - "description": "An array of collection names that is used to create SatelliteCollections\nfor a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).\nEach array element must be a string and a valid collection name.\nThe collection type cannot be modified later.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, + "vertex": { + "description": "The body has to be the JSON object to be stored.\n", "type": "object" } }, "required": [ - "collection" + "vertex" ], "type": "object" } @@ -14837,14 +20334,14 @@ } }, "responses": { - "201": { + "200": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 201, + "example": 200, "type": "integer" }, "error": { @@ -14852,224 +20349,72 @@ "example": false, "type": "boolean" }, - "graph": { - "description": "The information about the modified graph.\n", + "new": { + "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", "properties": { "_id": { - "description": "The internal id value of this graph.\n", - "type": "string" - }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "description": "The _id value of the stored data.\n", "type": "string" }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "name": { - "description": "The name of the graph.\n", + "_key": { + "description": "The _key value of the stored data.\n", "type": "string" }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" - }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" - }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "_rev": { + "description": "The _rev value of the stored data.\n", "type": "string" - }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" } }, "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", "_id", - "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" + "_key", + "_rev" ], "type": "object" - } - }, - "required": [ - "error", - "code", - "graph" - ], - "type": "object" - } - } - }, - "description": "Is returned if the collection can be created and `waitForSync` is enabled\nfor the `_graphs` collection, or given in the request.\nThe response body contains the graph configuration that has been stored.\n" - }, - "202": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 202, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" }, - "graph": { - "description": "The information about the newly created graph\n", + "old": { + "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", "properties": { "_id": { - "description": "The internal id value of this graph.\n", + "description": "The _id value of the stored data.\n", "type": "string" }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", + "_key": { + "description": "The _key value of the stored data.\n", "type": "string" }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "name": { - "description": "The name of the graph.\n", + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev" + ], + "type": "object" + }, + "vertex": { + "description": "The internal attributes for the vertex.\n", + "properties": { + "_id": { + "description": "The _id value of the stored data.\n", "type": "string" }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" - }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" - }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", + "_key": { + "description": "The _key value of the stored data.\n", "type": "string" }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" } }, "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", "_id", - "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" + "_key", + "_rev" ], "type": "object" } @@ -15077,49 +20422,109 @@ "required": [ "error", "code", - "graph" + "vertex" ], "type": "object" } } }, - "description": "Is returned if the collection can be created and `waitForSync` is disabled\nfor the `_graphs` collection, or given in the request.\nThe response body contains the graph configuration that has been stored.\n" + "description": "Returned if the vertex can be replaced, and `waitForSync` is `true`.\n" }, - "400": { + "202": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 400, + "example": 202, "type": "integer" }, "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, + "description": "A flag indicating that no error occurred.\n", + "example": false, "type": "boolean" }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" + "new": { + "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", + "properties": { + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev" + ], + "type": "object" }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" + "old": { + "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", + "properties": { + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev" + ], + "type": "object" + }, + "vertex": { + "description": "The internal attributes for the vertex.\n", + "properties": { + "_id": { + "description": "The _id value of the stored data.\n", + "type": "string" + }, + "_key": { + "description": "The _key value of the stored data.\n", + "type": "string" + }, + "_rev": { + "description": "The _rev value of the stored data.\n", + "type": "string" + } + }, + "required": [ + "_id", + "_key", + "_rev" + ], + "type": "object" } }, "required": [ "error", "code", - "errorNum", - "errorMessage" + "vertex" ], "type": "object" } } }, - "description": "Returned if the request is in an invalid format.\n" + "description": "Returned if the vertex can be replaced, and `waitForSync` is `false`.\n" }, "403": { "content": { @@ -15141,7 +20546,7 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, @@ -15155,7 +20560,7 @@ } } }, - "description": "Returned if your user has insufficient rights.\nIn order to modify a graph, you need to have at least the following privileges:\n- `Administrate` access on the database.\n- `Read Only` access on every collection used within this graph.\n" + "description": "Returned if your user has insufficient rights.\nIn order to replace vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" }, "404": { "content": { @@ -15177,463 +20582,506 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "Returned if no graph with this name can be found.\n" - } - }, - "summary": "Add a vertex collection", - "tags": [ - "Graphs" - ] - } - }, - "/_api/gharial/{graph}/vertex/{collection}": { - "delete": { - "description": "Removes a vertex collection from the list of the graph's\norphan collections. It can optionally delete the collection if it is\nnot used in any other graph.\n\nYou cannot remove vertex collections that are used in one of the\nedge definitions of the graph. You need to modify or remove the\nedge definition first in order to fully remove a vertex collection from\nthe graph.\n", - "operationId": "deleteVertexCollection", - "parameters": [ - { - "description": "The name of the graph.\n", - "in": "path", - "name": "graph", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The name of the vertex collection.\n", - "in": "path", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "Drop the collection as well.\nThe collection is only dropped if it is not used in other graphs.\n", - "in": "query", - "name": "dropCollection", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 200, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "graph": { - "description": "The information about the newly created graph\n", - "properties": { - "_id": { - "description": "The internal id value of this graph.\n", - "type": "string" - }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", - "type": "string" - }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "name": { - "description": "The name of the graph.\n", - "type": "string" - }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" - }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" - }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", - "type": "string" - }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" - } - }, - "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", - "_id", - "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" - ], - "type": "object" + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ "error", "code", - "graph" + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "Returned if the vertex collection was removed from the graph successfully\nand `waitForSync` is `true`.\n" + "description": "Returned in the following cases:\n- The graph cannot be found.\n- The collection is not part of the graph.\n- The vertex to replace does not exist.\n\nThis error also occurs if you try to run this operation as part of a\nStream Transaction but the transaction ID specified in the\n`x-arango-trx-id` header is unknown to the server.\n" }, - "202": { + "410": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 202, + "example": 410, "type": "integer" }, "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, + "description": "A flag indicating that an error occurred.\n", + "example": true, "type": "boolean" }, - "graph": { - "description": "The information about the newly created graph\n", - "properties": { - "_id": { - "description": "The internal id value of this graph.\n", - "type": "string" - }, - "_rev": { - "description": "The revision of this graph. Can be used to make sure to not override\nconcurrent modifications to this graph.\n", - "type": "string" - }, - "edgeDefinitions": { - "description": "An array of definitions for the relations of the graph.\nEach has the following type:\n", - "items": { - "properties": { - "collection": { - "description": "Name of the edge collection, where the edges are stored in.\n", - "type": "string" - }, - "from": { - "description": "List of vertex collection names.\nEdges in collection can only be inserted if their _from is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "to": { - "description": "List of vertex collection names.\n\nEdges in collection can only be inserted if their _to is in any of the collections here.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "collection", - "from", - "to" - ], - "type": "object" - }, - "type": "array" - }, - "isDisjoint": { - "description": "Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "isSatellite": { - "description": "Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.\n", - "type": "boolean" - }, - "isSmart": { - "description": "Whether the graph is a SmartGraph (Enterprise Edition only).\n", - "type": "boolean" - }, - "name": { - "description": "The name of the graph.\n", - "type": "string" - }, - "numberOfShards": { - "description": "Number of shards created for every new collection in the graph.\n", - "type": "integer" - }, - "orphanCollections": { - "description": "An array of additional vertex collections.\nDocuments in these collections do not have edges within this graph.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "replicationFactor": { - "description": "The replication factor used for every new collection in the graph.\nFor SatelliteGraphs, it is the string `\"satellite\"` (Enterprise Edition only).\n", - "type": "integer" - }, - "smartGraphAttribute": { - "description": "Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).\n", - "type": "string" - }, - "writeConcern": { - "description": "The default write concern for new collections in the graph.\nIt determines how many copies of each shard are required to be\nin sync on the different DB-Servers. If there are less than these many copies\nin the cluster, a shard refuses to write. Writes to shards with enough\nup-to-date copies succeed at the same time, however. The value of\n`writeConcern` cannot be greater than `replicationFactor`.\nFor SatelliteGraphs, the `writeConcern` is automatically controlled to equal the\nnumber of DB-Servers and the attribute is not available. _(cluster only)_\n", - "type": "integer" - } - }, - "required": [ - "name", - "edgeDefinitions", - "orphanCollections", - "numberOfShards", - "_id", - "_rev", - "replicationFactor", - "isSmart", - "isDisjoint", - "isSatellite" - ], - "type": "object" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ "error", "code", - "graph" + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "This error occurs if you try to run this operation as part of a\nStream Transaction that has just been canceled or timed out.\n" + }, + "412": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 412, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" ], "type": "object" } } - }, - "description": "Returned if the request was successful but `waitForSync` is `false`.\n" - }, - "400": { + }, + "description": "Returned if if-match header is given, but the stored documents revision is different.\n" + } + }, + "summary": "Replace a vertex", + "tags": [ + "Graphs" + ] + } + }, + "/_db/{database-name}/_api/import": { + "post": { + "description": "Load JSON data and store it as documents into the specified collection.\n\nThe request body can have different JSON formats:\n- One JSON object per line (JSONL)\n- A JSON array of objects\n- One JSON array per line (CSV-like)\n\nIf you import documents into edge collections, all documents require a `_from`\nand a `_to` attribute.\n", + "operationId": "importData", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the target collection. The collection needs to exist already.\n", + "in": "query", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Determines how the body of the request is interpreted.\n\n- `documents`: JSON Lines (JSONL) format. Each line is expected to be one\n JSON object.\n\n Example:\n\n ```json\n {\"_key\":\"john\",\"name\":\"John Smith\",\"age\":35}\n {\"_key\":\"katie\",\"name\":\"Katie Foster\",\"age\":28}\n ```\n\n- `array` (or `list`): JSON format. The request body is expected to be a\n JSON array of objects. This format requires ArangoDB to parse the complete\n array and keep it in memory for the duration of the import. This is more\n resource-intensive than the line-wise JSONL processing.\n\n Any whitespace outside of strings is ignored, which means the JSON data can be\n a single line or be formatted as multiple lines.\n\n Example:\n\n ```json\n [\n {\"_key\":\"john\",\"name\":\"John Smith\",\"age\":35},\n {\"_key\":\"katie\",\"name\":\"Katie Foster\",\"age\":28}\n ]\n ```\n\n- `auto`: automatically determines the type (either `documents` or `array`).\n\n- Omit the `type` parameter entirely to import JSON arrays of tabular data,\n similar to CSV.\n\n The first line is an array of strings that defines the attribute keys. The\n subsequent lines are arrays with the attribute values. The keys and values\n are matched by the order of the array elements.\n\n Example:\n\n ```json\n [\"_key\",\"name\",\"age\"]\n [\"john\",\"John Smith\",35]\n [\"katie\",\"Katie Foster\",28]\n ```\n", + "in": "query", + "name": "type", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "An optional prefix for the values in `_from` attributes. If specified, the\nvalue is automatically prepended to each `_from` input value. This allows\nspecifying just the keys for `_from`.\n", + "in": "query", + "name": "fromPrefix", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "An optional prefix for the values in `_to` attributes. If specified, the\nvalue is automatically prepended to each `_to` input value. This allows\nspecifying just the keys for `_to`.\n", + "in": "query", + "name": "toPrefix", + "required": false, + "schema": { + "type": "string" + } + }, + { + "description": "If this parameter has a value of `true` or `yes`, then all data in the\ncollection will be removed prior to the import. Note that any existing\nindex definitions will be preserved.\n", + "in": "query", + "name": "overwrite", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Wait until documents have been synced to disk before returning.\n", + "in": "query", + "name": "waitForSync", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Controls what action is carried out in case of a unique key constraint\nviolation.\n\n- `error`: this will not import the current document because of the unique\n key constraint violation. This is the default setting.\n- `update`: this will update an existing document in the database with the\n data specified in the request. Attributes of the existing document that\n are not present in the request will be preserved.\n- `replace`: this will replace an existing document in the database with the\n data specified in the request.\n- `ignore`: this will not update an existing document and simply ignore the\n error caused by a unique key constraint violation.\n\nNote that `update`, `replace` and `ignore` will only work when the\nimport document in the request contains the `_key` attribute. `update` and\n`replace` may also fail because of secondary unique key constraint violations.\n", + "in": "query", + "name": "onDuplicate", + "required": false, + "schema": { + "enum": [ + "error", + "update", + "replace", + "ignore" + ], + "type": "string" + } + }, + { + "description": "If set to `true`, the whole import fails if any error occurs. Otherwise, the\nimport continues even if some documents are invalid and cannot be imported,\nskipping the problematic documents.\n", + "in": "query", + "name": "complete", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "If set to `true`, the result includes a `details` attribute with information\nabout documents that could not be imported.\n", + "in": "query", + "name": "details", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "documents": { + "description": "The body must either be a JSON-encoded array of objects or a string with\nmultiple JSON objects separated by newlines.\n", + "type": "string" + } + }, + "required": [ + "documents" + ], + "type": "object" + } + } + } + }, + "responses": { + "201": { "content": { "application/json": { "schema": { "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 400, + "created": { + "description": "The number of imported documents.\n", "type": "integer" }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" + "details": { + "description": "An array with the error messages caused by documents that could not be imported.\nOnly present if `details` is set to `true`.\n", + "items": { + "type": "string" + }, + "type": "array" }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "Returned if the vertex collection is still used in an edge definition.\nIn this case it cannot be removed from the graph yet, it has to be\nremoved from the edge definition first.\n" - }, - "403": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 403, + "empty": { + "description": "The number of empty lines found in the input. Only greater than zero for the\ntypes `documents` and `auto`.\n", "type": "integer" }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" + "errors": { + "description": "The number of documents that were not imported due to errors.\n", + "type": "integer" }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" + "ignored": { + "description": "The number of failed but ignored insert operations. Only greater than zero if\n`onDuplicate` is set to `ignore`.\n", + "type": "integer" }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "updated": { + "description": "The number of updated/replaced documents. Only greater than zero if `onDuplicate`\nis set to either `update` or `replace`.\n", "type": "integer" } }, "required": [ - "error", - "code", - "errorNum", - "errorMessage" + "created", + "errors", + "empty", + "updated", + "ignored" ], "type": "object" } } }, - "description": "Returned if your user has insufficient rights.\nIn order to drop a vertex, you need to have at least the following privileges:\n- `Administrate` access on the database.\n" + "description": "is returned if all documents could be imported successfully.\n\nThe response is a JSON object with the following attributes:\n" + }, + "400": { + "description": "is returned if `type` contains an invalid value, no `collection` is\nspecified, the documents are incorrectly encoded, or the request\nis malformed.\n" }, "404": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 404, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" - }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } - }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "Returned if no graph with this name can be found.\n" + "description": "is returned if `collection` or the `_from` or `_to` attributes of an\nimported edge refer to an unknown collection.\n" + }, + "409": { + "description": "is returned if the import would trigger a unique key violation and\n`complete` is set to `true`.\n" + }, + "500": { + "description": "is returned if the server cannot auto-generate a document key (out of keys\nerror) for a document with no user-defined key.\n" } }, - "summary": "Remove a vertex collection", + "summary": "Import JSON data as documents", "tags": [ - "Graphs" + "Import" + ] + } + }, + "/_db/{database-name}/_api/index": { + "get": { + "description": "Returns an object with an `indexes` attribute containing an array of all\nindex descriptions for the given collection. The same information is also\navailable in the `identifiers` attribute as an object with the index identifiers\nas object keys.\n", + "operationId": "listIndexes", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The collection name.\n", + "in": "query", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Whether to include figures and estimates in the result.\n", + "in": "query", + "name": "withStats", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Whether to include hidden indexes in the result. Internal indexes\n(such as `arangosearch`) and ones that are currently built in the\nbackground are hidden.\n", + "in": "query", + "name": "withHidden", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "returns a JSON object containing a list of indexes on that collection.\n" + } + }, + "summary": "List all indexes of a collection", + "tags": [ + "Indexes" ] }, "post": { - "description": "Adds a vertex to the given collection.\n", - "operationId": "createVertex", + "description": "Creates a new index in the collection `collection`. Expects\nan object containing the index details.\n\nThe type of the index to be created must specified in the **type**\nattribute of the index details. Depending on the index type, additional\nother attributes may need to specified in the request in order to create\nthe index.\n\nIndexes require the to be indexed attribute(s) in the **fields** attribute\nof the index details. Depending on the index type, a single attribute or\nmultiple attributes can be indexed. In the latter case, an array of\nstrings is expected.\n\nThe `.` character denotes sub-attributes in attribute paths. Attributes with\nliteral `.` in their name cannot be indexed. Attributes with the name `_id`\ncannot be indexed either, neither as a top-level attribute nor as a sub-attribute.\n\nOptionally, an index name may be specified as a string in the **name** attribute.\nIndex names have the same restrictions as collection names. If no value is\nspecified, one will be auto-generated.\n\nPersistent indexes (including vertex-centric indexes) can be created as unique\nor non-unique variants. Uniqueness can be controlled by specifying the\n**unique** option for the index definition. Setting it to `true` creates a\nunique index. Setting it to `false` or omitting the `unique` attribute creates a\nnon-unique index.\n\n\u003e **INFO:**\nUnique indexes on non-shard keys are not supported in cluster deployments.\n\n\nPersistent indexes can optionally be created in a sparse\nvariant. A sparse index will be created if the **sparse** attribute in\nthe index details is set to `true`. Sparse indexes do not index documents\nfor which any of the index attributes is either not set or is `null`.\n\nThe optional **deduplicate** attribute is supported by persistent array indexes.\nIt controls whether inserting duplicate index values\nfrom the same document into a unique array index will lead to a unique constraint\nerror or not. The default value is `true`, so only a single instance of each\nnon-unique index value will be inserted into the index per document. Trying to\ninsert a value into the index that already exists in the index always fails,\nregardless of the value of this attribute.\n\nThe optional **estimates** attribute is supported by persistent indexes.\nThis attribute controls whether index selectivity estimates are\nmaintained for the index. Not maintaining index selectivity estimates can have\na slightly positive impact on write performance.\nThe downside of turning off index selectivity estimates will be that\nthe query optimizer will not be able to determine the usefulness of different\ncompeting indexes in AQL queries when there are multiple candidate indexes to\nchoose from.\nThe `estimates` attribute is optional and defaults to `true` if not set. It will\nhave no effect on indexes other than persistent indexes.\n\nThe optional attribute **cacheEnabled** is supported by indexes of type\n`persistent`. This attribute controls whether an extra in-memory hash cache is\ncreated for the index. The hash cache can be used to speed up index lookups.\nThe cache can only be used for queries that look up all index attributes via\nan equality lookup (`==`). The hash cache cannot be used for range scans,\npartial lookups or sorting.\nThe cache will be populated lazily upon reading data from the index. Writing data\ninto the collection or updating existing data will invalidate entries in the\ncache. The cache may have a negative effect on performance in case index values\nare updated more often than they are read.\nThe maximum size of cache entries that can be stored is currently 4 MB, i.e.\nthe cumulated size of all index entries for any index lookup value must be\nless than 4 MB. This limitation is there to avoid storing the index entries\nof \"super nodes\" in the cache.\n`cacheEnabled` defaults to `false` and should only be used for indexes that\nare known to benefit from an extra layer of caching.\n\nThe optional attribute **inBackground** can be set to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground.\n", + "operationId": "createIndex", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The collection name.\n", + "in": "query", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "index-details": { + "description": "The options for the index.\n", + "type": "object" + } + }, + "required": [ + "index-details" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "If the index already exists, then an *HTTP 200* is returned.\n" + }, + "201": { + "description": "If the index does not already exist and could be created, then an *HTTP 201*\nis returned.\n" + }, + "400": { + "description": "If an invalid index description is posted or attributes are used that the\ntarget index will not support, then an *HTTP 400* is returned.\n" + }, + "404": { + "description": "If `collection` is unknown, then an *HTTP 404* is returned.\n" + } + }, + "summary": "Create an index", + "tags": [ + "Indexes" + ] + } + }, + "/_db/{database-name}/_api/index#fulltext": { + "post": { + "description": "\u003e **WARNING:**\nThe fulltext index type is deprecated from version 3.10 onwards.\n\n\nCreates a fulltext index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n", + "operationId": "createIndexFulltext", "parameters": [ { - "description": "The name of the graph.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "graph", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the vertex collection the vertex should be inserted into.\n", - "in": "path", + "description": "The collection name.\n", + "in": "query", "name": "collection", "required": true, "schema": { "type": "string" } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "fields": { + "description": "an array of attribute names. Currently, the array is limited\nto exactly one attribute.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "inBackground": { + "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", + "type": "boolean" + }, + "minLength": { + "description": "Minimum character length of words to index. Will default\nto a server-defined value if unspecified. It is thus recommended to set\nthis value explicitly when creating the index.\n", + "type": "integer" + }, + "name": { + "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", + "type": "string" + }, + "type": { + "description": "Must be equal to `\"fulltext\"`.\n", + "type": "string" + } + }, + "required": [ + "type", + "fields", + "minLength" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "If the index already exists, then a *HTTP 200* is\nreturned.\n" }, + "201": { + "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + }, + "404": { + "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + } + }, + "summary": "Create a full-text index", + "tags": [ + "Indexes" + ] + } + }, + "/_db/{database-name}/_api/index#geo": { + "post": { + "description": "Creates a geo-spatial index in the collection `collection`, if\nit does not already exist. Expects an object containing the index details.\n\nGeo indexes are always sparse, meaning that documents that do not contain\nthe index attributes or have non-numeric values in the index attributes\nwill not be indexed.\n", + "operationId": "createIndexGeo", + "parameters": [ { - "description": "Define if the request should wait until synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "Define if the response should contain the complete\nnew version of the document.\n", + "description": "The collection name.\n", "in": "query", - "name": "returnNew", - "required": false, + "name": "collection", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], @@ -15642,13 +21090,37 @@ "application/json": { "schema": { "properties": { - "vertex": { - "description": "The body has to be the JSON object to be stored.\n", - "type": "object" + "fields": { + "description": "An array with one or two attribute paths.\n\nIf it is an array with one attribute path `location`, then a geo-spatial\nindex on all documents is created using `location` as path to the\ncoordinates. The value of the attribute must be an array with at least two\ndouble values. The array must contain the latitude (first value) and the\nlongitude (second value). All documents, which do not have the attribute\npath or with value that are not suitable, are ignored.\n\nIf it is an array with two attribute paths `latitude` and `longitude`,\nthen a geo-spatial index on all documents is created using `latitude`\nand `longitude` as paths the latitude and the longitude. The values of\nthe `latitude` and `longitude` attributes must each be a number (double).\nAll documents which do not have the attribute paths or which have\nvalues that are not suitable are ignored.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "geoJson": { + "description": "If a geo-spatial index on a `location` is constructed\nand `geoJson` is `true`, then the order within the array is longitude\nfollowed by latitude. This corresponds to the format described in\n\u003chttp://geojson.org/geojson-spec.html#positions\u003e\n", + "type": "boolean" + }, + "inBackground": { + "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", + "type": "boolean" + }, + "legacyPolygons": { + "description": "If `geoJson` is set to `true`, then this option controls how GeoJSON Polygons\nare interpreted.\n\n- If `legacyPolygons` is `true`, the smaller of the two regions defined by a\n linear ring is interpreted as the interior of the ring and a ring can at most\n enclose half the Earth's surface.\n- If `legacyPolygons` is `false`, the area to the left of the boundary ring's\n path is considered to be the interior and a ring can enclose the entire\n surface of the Earth.\n\nThe default is `true` for geo indexes that were created in versions before 3.10,\nand `false` for geo indexes created in 3.10 or later.\n", + "type": "boolean" + }, + "name": { + "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", + "type": "string" + }, + "type": { + "description": "Must be equal to `\"geo\"`.\n", + "type": "string" } }, "required": [ - "vertex" + "type", + "fields" ], "type": "object" } @@ -15656,571 +21128,783 @@ } }, "responses": { + "200": { + "description": "If the index already exists, then a *HTTP 200* is returned.\n" + }, "201": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 201, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } + "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + }, + "404": { + "description": "If the `collection` is unknown, then a *HTTP 404* is returned.\n" + } + }, + "summary": "Create a geo-spatial index", + "tags": [ + "Indexes" + ] + } + }, + "/_db/{database-name}/_api/index#inverted": { + "post": { + "description": "Creates an inverted index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n", + "operationId": "createIndexInverted", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The collection name.\n", + "in": "query", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "analyzer": { + "description": "The name of an Analyzer to use by default. This Analyzer is applied to the\nvalues of the indexed fields for which you don't define Analyzers explicitly.\n\nDefault: `identity`\n", + "type": "string" + }, + "cache": { + "description": "Enable this option to always cache the field normalization values in memory\nfor all fields by default. This can improve the performance of scoring and\nranking queries. Otherwise, these values are memory-mapped and it is up to the\noperating system to load them from disk into memory and to evict them from memory.\n\nNormalization values are computed for fields which are processed with Analyzers\nthat have the `\"norm\"` feature enabled. These values are used to score fairer if\nthe same tokens occur repeatedly, to emphasize these documents less.\n\nYou can also enable this option to always cache auxiliary data used for querying\nfields that are indexed with Geo Analyzers in memory for all fields by default.\nThis can improve the performance of geo-spatial queries.\n\nDefault: `false`\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", + "type": "boolean" + }, + "cleanupIntervalStep": { + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (default: 2, to disable use: 0).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation, a new state of the\n inverted index' internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", + "type": "integer" + }, + "commitIntervalMsec": { + "description": "Wait at least this many milliseconds between committing inverted index data store\nchanges and making documents visible to queries (default: 1000, to disable\nuse: 0).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the inverted index data store and possibly release space on the filesystem\n(default: 1000, to disable use: 0).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged\n(default: {}).\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n", + "properties": { + "minScore": { + "description": "Filter out consolidation candidates with a score less than this. Default: `0`\n", + "type": "integer" }, - "required": [ - "_id", - "_key", - "_rev" + "segmentsBytesFloor": { + "description": "Defines the value (in bytes) to treat all smaller segments as equal for\nconsolidation selection. Default: `2097152`\n", + "type": "integer" + }, + "segmentsBytesMax": { + "description": "The maximum allowed size of all consolidated segments in bytes.\nDefault: `5368709120`\n", + "type": "integer" + }, + "segmentsMax": { + "description": "The maximum number of segments that are evaluated as candidates for\nconsolidation. Default: `10`\n", + "type": "integer" + }, + "segmentsMin": { + "description": "The minimum number of segments that are evaluated as candidates for\nconsolidation. Default: `1`\n", + "type": "integer" + }, + "type": { + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe supported types are:\n\n- `\"tier\"` (default): consolidate based on segment byte size and live\n document count as dictated by the customization attributes.\n", + "type": "string" + } + }, + "type": "object" + }, + "features": { + "description": "A list of Analyzer features. You can set this option to overwrite what features\nare enabled for the default `analyzer`.\n\nDefault: the features as defined by the Analyzer itself.\n", + "items": { + "enum": [ + "frequency", + "norm", + "position", + "offset" ], - "type": "object" + "type": "string" }, - "vertex": { - "description": "The internal attributes for the vertex.\n", + "type": "array", + "uniqueItems": true + }, + "fields": { + "description": "An array of attribute paths. You can use strings to index the fields with the\ndefault options, or objects to specify options for the fields (with the\nattribute path in the `name` property), or a mix of both.\n", + "items": { "properties": { - "_id": { - "description": "The _id value of the stored data.\n", + "analyzer": { + "description": "The name of an Analyzer to use for this field.\n\nDefault: the value defined by the top-level `analyzer` option.\n", "type": "string" }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" + "cache": { + "description": "Enable this option to always cache the field normalization values in memory\nfor this specific field. This can improve the performance of scoring and\nranking queries. Otherwise, these values are memory-mapped and it is up to the\noperating system to load them from disk into memory and to evict them from memory.\n\nNormalization values are computed for fields which are processed with Analyzers\nthat have the `\"norm\"` feature enabled. These values are used to score fairer if\nthe same tokens occur repeatedly, to emphasize these documents less.\n\nYou can also enable this option to always cache auxiliary data used for querying\nfields that are indexed with Geo Analyzers in memory for this specific field.\nThis can improve the performance of geo-spatial queries.\n\nDefault: the value defined by the top-level `cache` option.\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", + "type": "boolean" }, - "_rev": { - "description": "The _rev value of the stored data.\n", + "features": { + "description": "A list of Analyzer features to use for this field. You can set this option to\noverwrite what features are enabled for the `analyzer`.\n\nDefault: the features as defined by the Analyzer itself, or inherited from the\ntop-level `features` option if the `analyzer` option adjacent to this option is\nnot set.\n", + "items": { + "enum": [ + "frequency", + "norm", + "position", + "offset" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + "includeAllFields": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nIf set to `true`, then all sub-attributes of this field are indexed, excluding\nany sub-attributes that are configured separately by other elements in the\n`fields` array (and their sub-attributes). The `analyzer` and `features`\nproperties apply to the sub-attributes.\n\nIf set to `false`, then sub-attributes are ignored.\n\nDefault: the value defined by the top-level `includeAllFields` option.\n", + "type": "boolean" + }, + "name": { + "description": "An attribute path. The `.` character denotes sub-attributes.\nYou can expand one array attribute with `[*]`.\n", "type": "string" + }, + "nested": { + "description": "Index the specified sub-objects that are stored in an array. Other than with the\n`fields` property, the values get indexed in a way that lets you query for\nco-occurring values. For example, you can search the sub-objects and all the\nconditions need to be met by a single sub-object instead of across all of them.\n\nThis property is available in the Enterprise Edition only.\n", + "items": { + "properties": { + "analyzer": { + "description": "The name of an Analyzer to use for this field.\nDefault: the value defined by the parent field, or the top-level `analyzer` option.\n", + "type": "string" + }, + "cache": { + "description": "Enable this option to always cache the field normalization values in memory\nfor this specific nested field. This can improve the performance of scoring and\nranking queries. Otherwise, these values are memory-mapped and it is up to the\noperating system to load them from disk into memory and to evict them from memory.\n\nNormalization values are computed for fields which are processed with Analyzers\nthat have the `\"norm\"` feature enabled. These values are used to score fairer if\nthe same tokens occur repeatedly, to emphasize these documents less.\n\nYou can also enable this option to always cache auxiliary data used for querying\nfields that are indexed with Geo Analyzers in memory for this specific nested field.\nThis can improve the performance of geo-spatial queries.\n\nDefault: the value defined by the top-level `cache` option.\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", + "type": "boolean" + }, + "features": { + "description": "A list of Analyzer features to use for this field. You can set this option to\noverwrite what features are enabled for the `analyzer`.\n\nDefault: the features as defined by the Analyzer itself, or inherited from the\nparent field's or top-level `features` option if no `analyzer` option is set\nat a deeper level, closer to this option.\n", + "items": { + "enum": [ + "frequency", + "norm", + "position", + "offset" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true + }, + "name": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", + "type": "string" + }, + "nested": { + "description": "You can recursively index sub-objects. See the above description of the\n`nested` option.\n", + "items": { + "type": "object" + }, + "type": "array" + }, + "searchField": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nYou can set the option to `true` to get the same behavior as with `arangosearch`\nViews regarding the indexing of array values for this field. If enabled, both,\narray and primitive values (strings, numbers, etc.) are accepted. Every element\nof an array is indexed according to the `trackListPositions` option.\n\nIf set to `false`, it depends on the attribute path. If it explicitly expands an\narray (`[*]`), then the elements are indexed separately. Otherwise, the array is\nindexed as a whole, but only `geopoint` and `aql` Analyzers accept array inputs.\nYou cannot use an array expansion if `searchField` is enabled.\n\nDefault: the value defined by the top-level `searchField` option.\n", + "type": "boolean" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "type": "array" + }, + "searchField": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nYou can set the option to `true` to get the same behavior as with `arangosearch`\nViews regarding the indexing of array values for this field. If enabled, both,\narray and primitive values (strings, numbers, etc.) are accepted. Every element\nof an array is indexed according to the `trackListPositions` option.\n\nIf set to `false`, it depends on the attribute path. If it explicitly expands an\narray (`[*]`), then the elements are indexed separately. Otherwise, the array is\nindexed as a whole, but only `geopoint` and `aql` Analyzers accept array inputs.\nYou cannot use an array expansion if `searchField` is enabled.\n\nDefault: the value defined by the top-level `searchField` option.\n", + "type": "boolean" + }, + "trackListPositions": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views,\nand `searchField` needs to be `true`.\n\nIf set to `true`, then track the value position in arrays for array values.\nFor example, when querying a document like `{ attr: [ \"valueX\", \"valueY\", \"valueZ\" ] }`,\nyou need to specify the array element, e.g. `doc.attr[1] == \"valueY\"`.\n\nIf set to `false`, all values in an array are treated as equal alternatives.\nYou don't specify an array element in queries, e.g. `doc.attr == \"valueY\"`, and\nall elements are searched for a match.\n\nDefault: the value defined by the top-level `trackListPositions` option.\n", + "type": "boolean" } }, "required": [ - "_id", - "_key", - "_rev" + "name" ], "type": "object" - } - }, - "required": [ - "error", - "code", - "vertex" - ], - "type": "object" - } - } - }, - "description": "Returned if the vertex can be added and `waitForSync` is `true`.\n" - }, - "202": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 202, - "type": "integer" }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" + "type": "array" + }, + "inBackground": { + "description": "This attribute can be set to `true` to create the index\nin the background, not write-locking the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", + "type": "boolean" + }, + "includeAllFields": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nIf set to `true`, then all document attributes are indexed, excluding any\nsub-attributes that are configured in the `fields` array (and their sub-attributes).\nThe `analyzer` and `features` properties apply to the sub-attributes.\n\nDefault: `false`\n\n\u003e **WARNING:**\nUsing `includeAllFields` for a lot of attributes in combination\nwith complex Analyzers may significantly slow down the indexing process.\n", + "type": "boolean" + }, + "name": { + "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", + "type": "string" + }, + "optimizeTopK": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nAn array of strings defining sort expressions that you want to optimize.\nThis is also known as _WAND optimization_ (introduced in v3.12.0).\n\nIf you query a View with the `SEARCH` operation in combination with a\n`SORT` and `LIMIT` operation, search results can be retrieved faster if the\n`SORT` expression matches one of the optimized expressions.\n\nOnly sorting by highest rank is supported, that is, sorting by the result\nof a scoring function in descending order (`DESC`). Use `@doc` in the expression\nwhere you would normally pass the document variable emitted by the `SEARCH`\noperation to the scoring function.\n\nYou can define up to 64 expressions per View.\n\nExample: `[\"BM25(@doc) DESC\", \"TFIDF(@doc, true) DESC\"]`\n\nDefault: `[]`\n\nThis property is available in the Enterprise Edition only.\n", + "items": { + "type": "string" }, - "new": { - "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } + "type": "array" + }, + "parallelism": { + "description": "The number of threads to use for indexing the fields. Default: `2`\n", + "type": "integer" + }, + "primaryKeyCache": { + "description": "Enable this option to always cache the primary key column in memory. This can\nimprove the performance of queries that return many documents. Otherwise, these\nvalues are memory-mapped and it is up to the operating system to load them from\ndisk into memory and to evict them from memory.\n\nDefault: `false`\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", + "type": "boolean" + }, + "primarySort": { + "description": "You can define a primary sort order to enable an AQL optimization. If a query\niterates over all documents of a collection, wants to sort them by attribute values,\nand the (left-most) fields to sort by, as well as their sorting direction, match\nwith the `primarySort` definition, then the `SORT` operation is optimized away.\n", + "properties": { + "cache": { + "description": "Enable this option to always cache the primary sort columns in memory. This can\nimprove the performance of queries that utilize the primary sort order.\nOtherwise, these values are memory-mapped and it is up to the operating system\nto load them from disk into memory and to evict them from memory.\n\nDefault: `false`\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", + "type": "boolean" }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" + "compression": { + "default": "lz4", + "description": "Defines how to compress the primary sort data.\n- `\"lz4\"`: use LZ4 fast compression.\n- `\"none\"`: disable compression to trade space for speed.\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "fields": { + "description": "An array of the fields to sort the index by and the direction to sort each field in.\n", + "items": { + "properties": { + "direction": { + "description": "The sorting direction.\n- `\"asc` for ascending\n- `\"desc\"` for descending\n", + "enum": [ + "asc", + "desc" + ], + "type": "string" + }, + "field": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", + "type": "string" + } + }, + "required": [ + "field", + "direction" + ], + "type": "object" + }, + "type": "array" + } }, - "vertex": { - "description": "The internal attributes generated while storing the vertex.\nDoes not include any attribute given in request body.\n", + "required": [ + "fields" + ], + "type": "object" + }, + "searchField": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nYou can set the option to `true` to get the same behavior as with `arangosearch`\nViews regarding the indexing of array values as the default. If enabled, both,\narray and primitive values (strings, numbers, etc.) are accepted. Every element\nof an array is indexed according to the `trackListPositions` option.\n\nIf set to `false`, it depends on the attribute path. If it explicitly expands an\narray (`[*]`), then the elements are indexed separately. Otherwise, the array is\nindexed as a whole, but only `geopoint` and `aql` Analyzers accept array inputs.\nYou cannot use an array expansion if `searchField` is enabled.\n\nDefault: `false`\n", + "type": "boolean" + }, + "storedValues": { + "description": "The optional `storedValues` attribute can contain an array of objects with paths\nto additional attributes to store in the index. These additional attributes\ncannot be used for index lookups or for sorting, but they can be used for\nprojections. This allows an index to fully cover more queries and avoid extra\ndocument lookups.\n\nYou may use the following shorthand notations on index creation instead of\nan array of objects. The default compression and cache settings are used in\nthis case:\n\n- An array of strings, like `[\"attr1\", \"attr2\"]`, to place each attribute into\n a separate column of the index (introduced in v3.10.3).\n\n- An array of arrays of strings, like `[[\"attr1\", \"attr2\"]]`, to place the\n attributes into a single column of the index, or `[[\"attr1\"], [\"attr2\"]]`\n to place each attribute into a separate column. You can also mix it with the\n full form:\n\n ```json\n [\n [\"attr1\"],\n [\"attr2\", \"attr3\"],\n { \"fields\": [\"attr4\", \"attr5\"], \"cache\": true }\n ]\n ```\n", + "items": { "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" + "cache": { + "description": "Enable this option to always cache stored values in memory. This can improve the\nquery performance if stored values are involved. Otherwise, these values are\nmemory-mapped and it is up to the operating system to load them from disk into\nmemory and to evict them from memory.\n\nDefault: `false`\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", + "type": "boolean" }, - "_key": { - "description": "The _key value of the stored data.\n", + "compression": { + "default": "lz4", + "description": "Defines how to compress the attribute values.\n- `\"lz4\"`: use LZ4 fast compression.\n- `\"none\"`: disable compression to trade space for speed.\n", + "enum": [ + "lz4", + "none" + ], "type": "string" }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" + "fields": { + "description": "A list of attribute paths. The `.` character denotes sub-attributes.\n", + "items": { + "type": "string" + }, + "type": "array" } }, "required": [ - "_id", - "_key", - "_rev" + "fields" ], "type": "object" - } - }, - "required": [ - "error", - "code", - "vertex" - ], - "type": "object" - } - } - }, - "description": "Returned if the request was successful but `waitForSync` is `false`.\n" - }, - "403": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 403, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" - }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } + }, + "type": "array" }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } + "trackListPositions": { + "description": "This option only applies if you use the inverted index in a `search-alias` Views,\nand `searchField` needs to be `true`.\n\nIf set to `true`, then track the value position in arrays for array values.\nFor example, when querying a document like `{ attr: [ \"valueX\", \"valueY\", \"valueZ\" ] }`,\nyou need to specify the array element, e.g. `doc.attr[1] == \"valueY\"`.\n\nIf set to `false`, all values in an array are treated as equal alternatives.\nYou don't specify an array element in queries, e.g. `doc.attr == \"valueY\"`, and\nall elements are searched for a match.\n", + "type": "boolean" + }, + "type": { + "description": "Must be equal to `\"inverted\"`.\n", + "type": "string" + }, + "writebufferActive": { + "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (default: 0, use 0 to disable)\n", + "type": "integer" + }, + "writebufferIdle": { + "description": "Maximum number of writers (segments) cached in the pool\n(default: 64, use 0 to disable)\n", + "type": "integer" + }, + "writebufferSizeMax": { + "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(ArangoDB server startup option). `0` value should be used carefully due to\nhigh potential memory consumption\n(default: 33554432, use 0 to disable)\n", + "type": "integer" + } + }, + "required": [ + "type", + "fields" + ], + "type": "object" } - }, - "description": "Returned if your user has insufficient rights.\nIn order to insert vertices into the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + } + } + }, + "responses": { + "200": { + "description": "If the index already exists, then a *HTTP 200* is returned.\n" + }, + "201": { + "description": "If the index does not already exist and can be created, then a *HTTP 201*\nis returned.\n" }, "404": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 404, - "type": "integer" + "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + } + }, + "summary": "Create an inverted index", + "tags": [ + "Indexes" + ] + } + }, + "/_db/{database-name}/_api/index#mdi": { + "post": { + "description": "Creates a multi-dimensional index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n", + "operationId": "createIndexMdi", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The collection name.\n", + "in": "query", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "estimates": { + "default": true, + "description": "This attribute controls whether index selectivity estimates are maintained for the\nindex. Not maintaining index selectivity estimates can have a slightly positive\nimpact on write performance.\n\nThe downside of turning off index selectivity estimates is that\nthe query optimizer is not able to determine the usefulness of different\ncompeting indexes in AQL queries when there are multiple candidate indexes to\nchoose from.\n\nThe `estimates` attribute is optional and defaults to `true` if not set.\nIt has no effect on indexes other than `persistent`, `mdi`, and `mdi-prefixed`.\nIt cannot be disabled for non-unique `mdi` indexes because they have a fixed\nselectivity estimate of `1`.\n", + "type": "boolean" + }, + "fieldValueTypes": { + "description": "Must be equal to `\"double\"`. Currently only doubles are supported as values.\n", + "type": "string" + }, + "fields": { + "description": "An array of attribute names used for each dimension. Array expansions are not allowed.\n", + "items": { + "type": "string" }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" + "type": "array" + }, + "inBackground": { + "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", + "type": "boolean" + }, + "name": { + "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", + "type": "string" + }, + "prefixFields": { + "description": "Requires `type` to be `\"mdi-prefixed\"`, and `prefixFields` needs to be set in this case.\n\nAn array of attribute names used as search prefix. Array expansions are not allowed.\n", + "items": { + "type": "string" }, - "errorMessage": { - "description": "A descriptive error message.\n", + "type": "array" + }, + "sparse": { + "default": false, + "description": "If `true`, then create a sparse index.\n", + "type": "boolean" + }, + "storedValues": { + "description": "The optional `storedValues` attribute can contain an array of paths to additional\nattributes to store in the index. These additional attributes cannot be used for\nindex lookups or for sorting, but they can be used for projections. This allows an\nindex to fully cover more queries and avoid extra document lookups.\n\nYou can have the same attributes in `storedValues` and `fields` as the attributes\nin `fields` cannot be used for projections, but you can also store additional\nattributes that are not listed in `fields`.\nAttributes in `storedValues` cannot overlap with the attributes specified in\n`prefixFields`. There is no reason to store them in the index because you need\nto specify them in queries in order to use `mdi-prefixed` indexes.\n\nYou cannot create multiple multi-dimensional indexes with the same `sparse`,\n`unique`, `fields` and (for `mdi-prefixed` indexes) `prefixFields` attributes\nbut different `storedValues` settings. That means the value of `storedValues` is\nnot considered by index creation calls when checking if an index is already\npresent or needs to be created.\n\nIn unique indexes, only the index attributes in `fields` and (for `mdi-prefixed`\nindexes) `prefixFields` are checked for uniqueness. The index attributes in\n`storedValues` are not checked for their uniqueness.\n\nNon-existing attributes are stored as `null` values inside `storedValues`.\n\nThe maximum number of attributes in `storedValues` is 32.\n", + "items": { "type": "string" }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } + "type": "array" }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } + "type": { + "description": "Must be equal to `\"mdi\"` or `\"mdi-prefixed\"`.\n", + "type": "string" + }, + "unique": { + "default": false, + "description": "if `true`, then create a unique index.\n", + "type": "boolean" + } + }, + "required": [ + "type", + "fields", + "fieldValueTypes" + ], + "type": "object" } - }, - "description": "Returned if no graph with this name can be found.\nOr if a graph is found but this collection is not part of the graph.\n" + } } }, - "summary": "Create a vertex", + "responses": { + "200": { + "description": "If the index already exists, then a *HTTP 200* is\nreturned.\n" + }, + "201": { + "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + }, + "400": { + "description": "If the index definition is invalid, then a *HTTP 400* is returned.\n" + }, + "404": { + "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + } + }, + "summary": "Create a multi-dimensional index", "tags": [ - "Graphs" + "Indexes" ] } }, - "/_api/gharial/{graph}/vertex/{collection}/{vertex}": { - "delete": { - "description": "Removes a vertex from the collection.\n", - "operationId": "deleteVertex", + "/_db/{database-name}/_api/index#persistent": { + "post": { + "description": "Creates a persistent index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n\nIn a sparse index all documents will be excluded from the index that do not\ncontain at least one of the specified index attributes (i.e. `fields`) or that\nhave a value of `null` in any of the specified index attributes. Such documents\nwill not be indexed, and not be taken into account for uniqueness checks if\nthe `unique` flag is set.\n\nIn a non-sparse index, these documents will be indexed (for non-present\nindexed attributes, a value of `null` will be used) and will be taken into\naccount for uniqueness checks if the `unique` flag is set.\n\n\u003e **INFO:**\nUnique indexes on non-shard keys are not supported in cluster deployments.\n", + "operationId": "createIndexPersistent", "parameters": [ { - "description": "The name of the graph.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "graph", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the vertex collection the vertex belongs to.\n", - "in": "path", + "description": "The collection name.\n", + "in": "query", "name": "collection", "required": true, "schema": { "type": "string" } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "cacheEnabled": { + "description": "This attribute controls whether an extra in-memory hash cache is\ncreated for the index. The hash cache can be used to speed up index lookups.\nThe cache can only be used for queries that look up all index attributes via\nan equality lookup (`==`). The hash cache cannot be used for range scans,\npartial lookups or sorting.\n\nThe cache will be populated lazily upon reading data from the index. Writing data\ninto the collection or updating existing data will invalidate entries in the\ncache. The cache may have a negative effect on performance in case index values\nare updated more often than they are read.\n\nThe maximum size of cache entries that can be stored is currently 4 MB, i.e.\nthe cumulated size of all index entries for any index lookup value must be\nless than 4 MB. This limitation is there to avoid storing the index entries\nof \"super nodes\" in the cache.\n\n`cacheEnabled` defaults to `false` and should only be used for indexes that\nare known to benefit from an extra layer of caching.\n", + "type": "boolean" + }, + "deduplicate": { + "description": "The attribute controls whether inserting duplicate index values\nfrom the same document into a unique array index will lead to a unique constraint\nerror or not. The default value is `true`, so only a single instance of each\nnon-unique index value will be inserted into the index per document. Trying to\ninsert a value into the index that already exists in the index will always fail,\nregardless of the value of this attribute.\n", + "type": "boolean" + }, + "estimates": { + "description": "This attribute controls whether index selectivity estimates are maintained for the\nindex. Not maintaining index selectivity estimates can have a slightly positive\nimpact on write performance.\n\nThe downside of turning off index selectivity estimates is that\nthe query optimizer is not able to determine the usefulness of different\ncompeting indexes in AQL queries when there are multiple candidate indexes to\nchoose from.\n\nThe `estimates` attribute is optional and defaults to `true` if not set.\nIt has no effect on indexes other than `persistent`, `mdi`, and `mdi-prefixed`.\n", + "type": "boolean" + }, + "fields": { + "description": "An array of attribute paths.\n\nThe `.` character denotes sub-attributes in attribute paths. Attributes with\nliteral `.` in their name cannot be indexed. Attributes with the name `_id`\ncannot be indexed either, neither as a top-level attribute nor as a sub-attribute.\n\nYou can expand one array attribute with `[*]`.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "inBackground": { + "description": "This attribute can be set to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", + "type": "boolean" + }, + "name": { + "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", + "type": "string" + }, + "sparse": { + "description": "If `true`, then create a sparse index. Defaults to `false`.\n", + "type": "boolean" + }, + "storedValues": { + "description": "The optional `storedValues` attribute can contain an array of paths to additional\nattributes to store in the index. These additional attributes cannot be used for\nindex lookups or for sorting, but they can be used for projections. This allows an\nindex to fully cover more queries and avoid extra document lookups.\nThe maximum number of attributes in `storedValues` is 32.\n\nIt is not possible to create multiple indexes with the same `fields` attributes\nand uniqueness but different `storedValues` attributes. That means the value of\n`storedValues` is not considered by index creation calls when checking if an\nindex is already present or needs to be created.\n\nIn unique indexes, only the attributes in `fields` are checked for uniqueness,\nbut the attributes in `storedValues` are not checked for their uniqueness.\nNon-existing attributes are stored as `null` values inside `storedValues`.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "description": "Must be equal to `\"persistent\"`.\n", + "type": "string" + }, + "unique": { + "description": "If `true`, then create a unique index. Defaults to `false`.\nIn unique indexes, only the attributes in `fields` are checked for uniqueness,\nbut the attributes in `storedValues` are not checked for their uniqueness.\n", + "type": "boolean" + } + }, + "required": [ + "type", + "fields" + ], + "type": "object" + } + } + } + }, + "responses": { + "200": { + "description": "If the index already exists, then a *HTTP 200* is\nreturned.\n" + }, + "201": { + "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + }, + "400": { + "description": "If the collection already contains documents and you try to create a unique\npersistent index in such a way that there are documents violating the\nuniqueness, then a *HTTP 400* is returned.\n" }, + "404": { + "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + } + }, + "summary": "Create a persistent index", + "tags": [ + "Indexes" + ] + } + }, + "/_db/{database-name}/_api/index#ttl": { + "post": { + "description": "Creates a time-to-live (TTL) index for the collection `collection-name` if it\ndoes not already exist. The call expects an object containing the index\ndetails.\n", + "operationId": "createIndexTtl", + "parameters": [ { - "description": "The `_key` attribute of the vertex.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "vertex", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Define if the request should wait until synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", + "description": "The collection name.\n", "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", - "in": "header", - "name": "if-match", - "required": false, + "name": "collection", + "required": true, "schema": { "type": "string" } } ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 200, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "old": { - "description": "The complete deleted vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "removed": { - "description": "Is set to true if the remove was successful.\n", - "type": "boolean" - } - }, - "required": [ - "error", - "code", - "removed" - ], - "type": "object" - } - } - }, - "description": "Returned if the vertex can be removed.\n" - }, - "202": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 202, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "old": { - "description": "The complete deleted vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "removed": { - "description": "Is set to true if the remove was successful.\n", - "type": "boolean" - } - }, - "required": [ - "error", - "code", - "removed" - ], - "type": "object" - } - } - }, - "description": "Returned if the request was successful but `waitForSync` is `false`.\n" - }, - "403": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 403, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" - }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "expireAfter": { + "description": "The time interval (in seconds) from the point in time stored in the `fields`\nattribute after which the documents count as expired. Can be set to `0` to let\ndocuments expire as soon as the server time passes the point in time stored in\nthe document attribute, or to a higher number to delay the expiration.\n", + "type": "number" }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "Returned if your user has insufficient rights.\nIn order to delete vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" - }, - "404": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 404, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", + "fields": { + "description": "an array with exactly one attribute path.\n", + "items": { "type": "string" }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } + "type": "array" }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "Returned in the following cases:\n- No graph with this name can be found.\n- This collection is not part of the graph.\n- The vertex to remove does not exist.\n" - }, - "412": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 412, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, - "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" - }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" - } + "inBackground": { + "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", + "type": "boolean" }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], - "type": "object" - } + "name": { + "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", + "type": "string" + }, + "type": { + "description": "Must be equal to `\"ttl\"`.\n", + "type": "string" + } + }, + "required": [ + "type", + "fields", + "expireAfter" + ], + "type": "object" } - }, - "description": "Returned if if-match header is given, but the stored documents revision is different.\n" + } } }, - "summary": "Remove a vertex", + "responses": { + "200": { + "description": "If the index already exists, then a *HTTP 200* is returned.\n" + }, + "201": { + "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + }, + "400": { + "description": "If the collection already contains another TTL index, then an *HTTP 400* is\nreturned, as there can be at most one TTL index per collection.\n" + }, + "404": { + "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + } + }, + "summary": "Create a TTL index", "tags": [ - "Graphs" + "Indexes" ] - }, - "get": { - "description": "Gets a vertex from the given collection.\n", - "operationId": "getVertex", + } + }, + "/_db/{database-name}/_api/index/{index-id}": { + "delete": { + "description": "Deletes an index with `index-id`.\n", + "operationId": "deleteIndex", "parameters": [ { - "description": "The name of the graph.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "graph", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the vertex collection the vertex belongs to.\n", + "description": "The index id.\n", "in": "path", - "name": "collection", + "name": "index-id", "required": true, "schema": { "type": "string" } + } + ], + "responses": { + "200": { + "description": "If the index could be deleted, then an *HTTP 200* is\nreturned.\n" }, + "404": { + "description": "If the `index-id` is unknown, then an *HTTP 404* is returned.\n" + } + }, + "summary": "Delete an index", + "tags": [ + "Indexes" + ] + }, + "get": { + "description": "The result is an object describing the index. It has at least the following\nattributes:\n\n- `id`: the identifier of the index\n\n- `type`: the index type\n\nAll other attributes are type-dependent. For example, some indexes provide\n`unique` or `sparse` flags, whereas others don't. Some indexes also provide\na selectivity estimate in the `selectivityEstimate` attribute of the result.\n", + "operationId": "getIndex", + "parameters": [ { - "description": "The `_key` attribute of the vertex.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "vertex", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Must contain a revision.\nIf this is set a document is only returned if\nit has exactly this revision.\nAlso see if-match header as an alternative to this.\n", - "in": "query", - "name": "rev", - "required": false, + "description": "The index identifier.\n", + "in": "path", + "name": "index-id", + "required": true, "schema": { "type": "string" } + } + ], + "responses": { + "200": { + "description": "If the index exists, then a *HTTP 200* is returned.\n" }, + "404": { + "description": "If the index does not exist, then a *HTTP 404*\nis returned.\n" + } + }, + "summary": "Get an index", + "tags": [ + "Indexes" + ] + } + }, + "/_db/{database-name}/_api/job/{job-id}": { + "delete": { + "description": "Deletes either all job results, expired job results, or the result of a\nspecific job.\nClients can use this method to perform an eventual garbage collection of job\nresults.\n", + "operationId": "deleteJob", + "parameters": [ { - "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an query parameter `rev`.\n", - "in": "header", - "name": "if-match", - "required": false, + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { "type": "string" } }, { - "description": "If the \"If-None-Match\" header is given, then it must contain exactly one ETag. The document is returned,\nonly if it has a different revision as the given ETag. Otherwise a HTTP 304 is returned.\n", - "in": "header", - "name": "if-none-match", - "required": false, + "description": "The ID of the job to delete. The ID can be:\n- `all`: Deletes all jobs results. Currently executing or queued async\n jobs are not stopped by this call.\n- `expired`: Deletes expired results. To determine the expiration status of a\n result, pass the stamp query parameter. stamp needs to be a Unix timestamp,\n and all async job results created before this time are deleted.\n- **A numeric job ID**: In this case, the call removes the result of the\n specified async job. If the job is currently executing or queued, it is\n not aborted.\n", + "in": "path", + "name": "job-id", + "required": true, "schema": { "type": "string" } + }, + { + "description": "A Unix timestamp specifying the expiration threshold for when the `job-id` is\nset to `expired`.\n", + "in": "query", + "name": "stamp", + "required": false, + "schema": { + "type": "number" + } } ], "responses": { @@ -16229,59 +21913,29 @@ "application/json": { "schema": { "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 200, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, + "result": { + "description": "Always `true`.\n", + "example": true, "type": "boolean" - }, - "vertex": { - "description": "The complete vertex.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" } }, "required": [ - "error", - "code", - "vertex" + "result" ], "type": "object" } } }, - "description": "Returned if the vertex can be found.\n" + "description": "The result of a specific job has been deleted successfully.\nThis code is also returned if the deletion of `all` or `expired`\njobs has been requested, including if no results were deleted.\n" }, - "304": { + "400": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 304, + "example": 400, "type": "integer" }, "error": { @@ -16294,30 +21948,30 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, "required": [ - "error", "code", - "errorNum", - "errorMessage" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned if the if-none-match header is given and the\ncurrently stored vertex still has this revision value.\nSo there was no update between the last time the vertex\nwas fetched by the caller.\n" + "description": "The `job-id` is missing in the request or has an invalid value.\nIn this case, no `x-arango-async-id` HTTP header is returned.\n" }, - "403": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 403, + "example": 404, "type": "integer" }, "error": { @@ -16330,30 +21984,87 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, "required": [ - "error", "code", - "errorNum", - "errorMessage" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned if your user has insufficient rights.\nIn order to update vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Read Only` access on the given collection.\n" + "description": "The job cannot be found or has already been deleted, or the result\nhas already been fetched. In this case, no `x-arango-async-id`\nHTTP header is returned.\n" + } + }, + "summary": "Delete async job results", + "tags": [ + "Jobs" + ] + }, + "get": { + "description": "This endpoint returns either of the following, depending on the specified value\nfor the `job-id` parameter:\n\n- The IDs of async jobs with a specific status\n- The processing status of a specific async job\n", + "operationId": "getJob", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } }, - "404": { + { + "description": "If you provide a value of `pending` or `done`, then the endpoint returns an\narray of strings with the job IDs of ongoing or completed async jobs.\n\nIf you provide a numeric job ID, then the endpoint returns the status of the\nspecific async job in the form of an HTTP reply without payload. Check the\nHTTP status code of the response for the job status.\n", + "in": "path", + "name": "job-id", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The maximum number of job IDs to return per call. If not specified, a\nserver-defined maximum value is used. Only applicable if you specify `pending`\nor `done` as `job-id` to list jobs.\n", + "in": "query", + "name": "count", + "required": false, + "schema": { + "type": "number" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "description": "A list of job IDs. The list can be empty.\n", + "items": { + "type": "string" + }, + "type": "array" + } + } + }, + "description": "The job has finished and you can fetch the result (the response has\nno body in this case), or your request for the list of `pending` or\n`done` jobs has been successful.\n" + }, + "204": { + "description": "The job is still in the queue of pending (or not yet finished) jobs.\n" + }, + "400": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 400, "type": "integer" }, "error": { @@ -16366,30 +22077,30 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, "required": [ - "error", "code", - "errorNum", - "errorMessage" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned in the following cases:\n- No graph with this name could be found.\n- This collection is not part of the graph.\n- The vertex does not exist.\n" + "description": "The `job-id` is missing in the request or has an invalid value.\nIn this case, no `x-arango-async-id` HTTP header is returned.\n" }, - "412": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 412, + "example": 404, "type": "integer" }, "error": { @@ -16402,324 +22113,100 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, "required": [ - "error", "code", - "errorNum", - "errorMessage" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned if if-match header is given, but the stored documents revision is different.\n" + "description": "The job cannot be found or has already been deleted, or the result\nhas already been fetched. In this case, no `x-arango-async-id`\nHTTP header is returned.\n" } }, - "summary": "Get a vertex", + "summary": "List async jobs by status or get the status of specific job", "tags": [ - "Graphs" + "Jobs" ] }, - "patch": { - "description": "Updates the data of the specific vertex in the collection.\n", - "operationId": "updateVertex", + "put": { + "description": "Returns the result of an async job identified by `job-id` if it's ready.\n\nIf the async job result is available on the server, the endpoint returns\nthe original operation's result headers and body, plus the additional\n`x-arango-async-job-id` HTTP header. The result and job are then removed\nwhich means that you can retrieve the result exactly once.\n\nIf the result is not available yet or if the job is not known (anymore),\nthe additional header is not present and you can tell the status from\nthe HTTP status code.\n", + "operationId": "getJobResult", "parameters": [ { - "description": "The name of the graph.\n", - "in": "path", - "name": "graph", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The name of the vertex collection the vertex belongs to.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", "in": "path", - "name": "collection", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The `_key` attribute of the vertex.\n", + "description": "The async job id.\n", "in": "path", - "name": "vertex", + "name": "job-id", "required": true, "schema": { "type": "string" } - }, - { - "description": "Define if the request should wait until synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Define if values set to `null` should be stored.\nBy default (`true`), the given documents attribute(s) are set to `null`.\nIf this parameter is set to `false`, top-level attribute and sub-attributes with\na `null` value in the request are removed from the document (but not attributes\nof objects that are nested inside of arrays).\n", - "in": "query", - "name": "keepNull", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Define if a presentation of the new document should\nbe returned within the response object.\n", - "in": "query", - "name": "returnNew", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", - "in": "header", - "name": "if-match", - "required": false, - "schema": { - "type": "string" - } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "vertex": { - "description": "The body has to contain a JSON object containing exactly the attributes that should be overwritten, all other attributes remain unchanged.\n", - "type": "object" - } - }, - "required": [ - "vertex" - ], - "type": "object" - } - } - } - }, "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 200, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "old": { - "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "vertex": { - "description": "The internal attributes for the vertex.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - } - }, - "required": [ - "error", - "code", - "vertex" - ], - "type": "object" - } - } - }, - "description": "Returned if the vertex can be updated, and `waitForSync` is `true`.\n" + "204": { + "description": "The job is still in the queue of pending (or not yet finished) jobs.\nIn this case, no `x-arango-async-id` HTTP header is returned.\n" }, - "202": { + "400": { "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 202, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "old": { - "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" }, - "vertex": { - "description": "The internal attributes for the vertex.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "The ArangoDB error number for the error that occurred.\n", + "type": "integer" } }, "required": [ - "error", "code", - "vertex" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned if the request was successful, and `waitForSync` is `false`.\n" + "description": "The `job-id` is missing in the request or has an invalid value.\nIn this case, no `x-arango-async-id` HTTP header is returned.\n" }, - "403": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 403, + "example": 404, "type": "integer" }, "error": { @@ -16732,30 +22219,86 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, "required": [ - "error", "code", - "errorNum", - "errorMessage" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned if your user has insufficient rights.\nIn order to update vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + "description": "The job cannot be found or has already been deleted, or the result\nhas already been fetched. In this case, no `x-arango-async-id`\nHTTP header is returned.\n" }, - "404": { + "default": { + "description": "If the job has finished, you get the result with the headers of the\noriginal operation with an additional `x-arango-async-id` HTTP header.\nThe HTTP status code is also that of the operation that executed\nasynchronously, which can be a success or error code depending on\nthe outcome of the operation.\n" + } + }, + "summary": "Get the results of an async job", + "tags": [ + "Jobs" + ] + } + }, + "/_db/{database-name}/_api/job/{job-id}/cancel": { + "put": { + "description": "Cancels the currently running job identified by `job-id`. Note that it still\nmight take some time to actually cancel the running async job.\n", + "operationId": "cancelJob", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The async job id.\n", + "in": "path", + "name": "job-id", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "result": { + "description": "Always `true`.\n", + "example": true, + "type": "boolean" + } + }, + "required": [ + "result" + ], + "type": "object" + } + } + }, + "description": "The job cancellation has been initiated.\n" + }, + "400": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 404, + "example": 400, "type": "integer" }, "error": { @@ -16768,30 +22311,30 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, "required": [ - "error", "code", - "errorNum", - "errorMessage" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned in the following cases:\n- No graph with this name can be found.\n- This collection is not part of the graph.\n- The vertex to update does not exist.\n" + "description": "The `job-id` is missing in the request or has an invalid value.\nIn this case, no `x-arango-async-id` HTTP header is returned.\n" }, - "412": { + "404": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 412, + "example": 404, "type": "integer" }, "error": { @@ -16804,103 +22347,102 @@ "type": "string" }, "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "description": "The ArangoDB error number for the error that occurred.\n", "type": "integer" } }, "required": [ - "error", "code", - "errorNum", - "errorMessage" + "error", + "errorMessage", + "errorNum" ], "type": "object" } } }, - "description": "Returned if if-match header is given, but the stored documents revision is different.\n" + "description": "The job cannot be found or has already been deleted, or the result\nhas already been fetched. In this case, no `x-arango-async-id`\nHTTP header is returned.\n" } }, - "summary": "Update a vertex", + "summary": "Cancel an async job", "tags": [ - "Graphs" + "Jobs" ] - }, - "put": { - "description": "Replaces the data of a vertex in the collection.\n", - "operationId": "replaceVertex", + } + }, + "/_db/{database-name}/_api/key-generators": { + "get": { + "description": "Returns the available key generators for collections.\n", + "operationId": "getKeyGenerators", "parameters": [ { - "description": "The name of the graph.\n", - "in": "path", - "name": "graph", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The name of the vertex collection the vertex belongs to.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", "in": "path", - "name": "collection", + "name": "database-name", "required": true, "schema": { "type": "string" } - }, + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "keyGenerators": { + "description": "The available document key generators.\n", + "example": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "items": { + "enum": [ + "traditional", + "autoincrement", + "uuid", + "padded" + ], + "type": "string" + }, + "type": "array", + "uniqueItems": true + } + }, + "required": [ + "keyGenerators" + ], + "type": "object" + } + } + }, + "description": "An object that contains a list of the available generators for document keys.\n" + } + }, + "summary": "Get the available key generators", + "tags": [ + "Collections" + ] + } + }, + "/_db/{database-name}/_api/query": { + "post": { + "description": "This endpoint is for query validation only. To actually query the database,\nsee `/api/cursor`.\n", + "operationId": "parseAqlQuery", + "parameters": [ { - "description": "The `_key` attribute of the vertex.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", "in": "path", - "name": "vertex", + "name": "database-name", "required": true, "schema": { "type": "string" } - }, - { - "description": "Define if the request should wait until synced to disk.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Define if values set to `null` should be stored.\nBy default (`true`), the given documents attribute(s) are set to `null`.\nIf this parameter is set to `false`, top-level attribute and sub-attributes with\na `null` value in the request are removed from the document (but not attributes\nof objects that are nested inside of arrays).\n", - "in": "query", - "name": "keepNull", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Define if a presentation of the deleted document should\nbe returned within the response object.\n", - "in": "query", - "name": "returnOld", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Define if a presentation of the new document should\nbe returned within the response object.\n", - "in": "query", - "name": "returnNew", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If the \"If-Match\" header is given, then it must contain exactly one ETag. The document is updated,\nif it has the same revision as the given ETag. Otherwise a HTTP 412 is returned. As an alternative\nyou can supply the ETag in an attribute rev in the URL.\n", - "in": "header", - "name": "if-match", - "required": false, - "schema": { - "type": "string" - } } ], "requestBody": { @@ -16908,19 +22450,49 @@ "application/json": { "schema": { "properties": { - "vertex": { - "description": "The body has to be the JSON object to be stored.\n", - "type": "object" + "query": { + "description": "To validate a query string without executing it, the query string can be\npassed to the server via an HTTP POST request.\n", + "type": "string" } }, "required": [ - "vertex" + "query" ], "type": "object" } } } - }, + }, + "responses": { + "200": { + "description": "If the query is valid, the server will respond with *HTTP 200* and\nreturn the names of the bind parameters it found in the query (if any) in\nthe `bindVars` attribute of the response. It will also return an array\nof the collections used in the query in the `collections` attribute.\nIf a query can be parsed successfully, the `ast` attribute of the returned\nJSON will contain the abstract syntax tree representation of the query.\nThe format of the `ast` is subject to change in future versions of\nArangoDB, but it can be used to inspect how ArangoDB interprets a given\nquery. Note that the abstract syntax tree will be returned without any\noptimizations applied to it.\n" + }, + "400": { + "description": "The request is malformed or the query contains a parse error.\nThe body of the response contains the error details embedded in a JSON object.\n" + } + }, + "summary": "Parse an AQL query", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/query-cache": { + "delete": { + "description": "Clears all results stored in the AQL query results cache for the current database.\n", + "operationId": "deleteAqlQueryCache", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], "responses": { "200": { "content": { @@ -16936,497 +22508,444 @@ "description": "A flag indicating that no error occurred.\n", "example": false, "type": "boolean" - }, - "new": { - "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "old": { - "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "vertex": { - "description": "The internal attributes for the vertex.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } - }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" } }, "required": [ "error", - "code", - "vertex" + "code" ], "type": "object" } } }, - "description": "Returned if the vertex can be replaced, and `waitForSync` is `true`.\n" + "description": "The results cache has been cleared.\n" }, - "202": { + "400": { + "description": "The request is malformed.\n" + } + }, + "summary": "Clear the AQL query results cache", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/query-cache/entries": { + "get": { + "description": "Returns an array containing the AQL query results currently stored in the query results\ncache of the selected database.\n", + "operationId": "listQueryCacheResults", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { "content": { "application/json": { "schema": { - "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 202, - "type": "integer" - }, - "error": { - "description": "A flag indicating that no error occurred.\n", - "example": false, - "type": "boolean" - }, - "new": { - "description": "The complete newly written vertex document.\nIncludes all written attributes in the request body\nand all internal attributes generated by ArangoDB.\nOnly present if `returnNew` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } + "description": "The entries of the query results cache.\n", + "items": { + "description": "The properties of a cache entry.\n", + "properties": { + "bindVars": { + "description": "The bind parameters. This attribute is omitted if the\n`--query.tracking-with-bindvars` startup option is set\nto `false`.\n", + "type": "object" }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "old": { - "description": "The complete overwritten vertex document.\nIncludes all attributes stored before this operation.\nOnly present if `returnOld` is `true`.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", + "dataSources": { + "description": "The collections and Views involved in the query.\n", + "items": { "type": "string" }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } + "type": "array" }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - }, - "vertex": { - "description": "The internal attributes for the vertex.\n", - "properties": { - "_id": { - "description": "The _id value of the stored data.\n", - "type": "string" - }, - "_key": { - "description": "The _key value of the stored data.\n", - "type": "string" - }, - "_rev": { - "description": "The _rev value of the stored data.\n", - "type": "string" - } + "hash": { + "description": "The hash value calculated from the the query string,\ncertain query options, and the bind variables.\n", + "type": "string" }, - "required": [ - "_id", - "_key", - "_rev" - ], - "type": "object" - } + "hits": { + "description": "How many times the result has been served from the cache so far.\n", + "type": "integer" + }, + "query": { + "description": "The query string.\n", + "type": "string" + }, + "results": { + "description": "The number of documents/rows in the query result.\n", + "type": "integer" + }, + "runTime": { + "description": "The total duration of the query in seconds.\n", + "type": "number" + }, + "size": { + "description": "The size of the query result and bind parameters (in bytes).\n", + "type": "integer" + }, + "started": { + "description": "The date and time at which the query result has been added\nto the cache (in ISO 8601 format).\n", + "format": "date-time", + "type": "string" + } + }, + "required": [ + "hash", + "query", + "size", + "results", + "hits", + "runTime", + "started", + "dataSources" + ], + "type": "object" }, - "required": [ - "error", - "code", - "vertex" - ], - "type": "object" + "type": "array" } } }, - "description": "Returned if the vertex can be replaced, and `waitForSync` is `false`.\n" + "description": "The list of cached query results.\n" }, - "403": { + "400": { + "description": "The request is malformed.\n" + } + }, + "summary": "List the entries of the AQL query results cache", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/query-cache/properties": { + "get": { + "description": "Returns the global AQL query results cache configuration.\n", + "operationId": "getQueryCacheProperties", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { "content": { "application/json": { "schema": { + "description": "The result cache configuration.\n", "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 403, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, + "includeSystem": { + "description": "Whether results of queries that involve system collections\nare stored in the query results cache.\n", "type": "boolean" }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" + "maxEntrySize": { + "description": "The maximum individual result size of queries that are\nstored per database-specific cache (in bytes).\n" }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "maxResults": { + "description": "The maximum number of query results that are stored per\ndatabase-specific cache.\n", + "type": "integer" + }, + "maxResultsSize": { + "description": "The maximum cumulated size of query results that are\nstored per database-specific cache (in bytes).\n", "type": "integer" + }, + "mode": { + "description": "The mode the AQL query results cache operates in.\n", + "enum": [ + "off", + "on", + "demand" + ], + "type": "string" } }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], "type": "object" } } }, - "description": "Returned if your user has insufficient rights.\nIn order to replace vertices in the graph, you need to have at least the following privileges:\n- `Read Only` access on the database.\n- `Write` access on the given collection.\n" + "description": "The result cache configuration is returned successfully.\n" }, - "404": { + "400": { + "description": "The request is malformed.\n" + } + }, + "summary": "Get the AQL query results cache configuration", + "tags": [ + "Queries" + ] + }, + "put": { + "description": "Adjusts the global properties for the AQL query results cache.\n\nChanging the properties may invalidate all results currently in the cache.\n", + "operationId": "setQueryCacheProperties", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "description": "The result cache configuration settings to change.\n", + "properties": { + "includeSystem": { + "description": "Whether to store results of queries that involve\nsystem collections in the cache.\n", + "type": "boolean" + }, + "maxEntrySize": { + "description": "The maximum individual size of query results that are stored\nper database-specific cache (in bytes).\n", + "type": "integer" + }, + "maxResults": { + "description": "The maximum number of query results that are stored per\ndatabase-specific cache.\n", + "type": "integer" + }, + "maxResultsSize": { + "description": "The maximum cumulated size of query results that are stored\nper database-specific cache (in bytes).\n", + "type": "integer" + }, + "mode": { + "description": "The mode the AQL query cache shall operate in.\n", + "enum": [ + "off", + "on", + "demand" + ], + "type": "string" + } + }, + "type": "object" + } + } + } + }, + "responses": { + "200": { "content": { "application/json": { "schema": { + "description": "The result cache configuration.\n", "properties": { - "code": { - "description": "The HTTP response status code.\n", - "example": 404, - "type": "integer" - }, - "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, + "includeSystem": { + "description": "Whether results of queries that involve system collections\nare stored in the query results cache.\n", "type": "boolean" }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" + "maxEntrySize": { + "description": "The maximum individual result size of queries that are\nstored per database-specific cache (in bytes).\n" }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", + "maxResults": { + "description": "The maximum number of query results that are stored per\ndatabase-specific cache.\n", + "type": "integer" + }, + "maxResultsSize": { + "description": "The maximum cumulated size of query results that are\nstored per database-specific cache (in bytes).\n", "type": "integer" + }, + "mode": { + "description": "The mode the AQL query results cache operates in.\n", + "enum": [ + "off", + "on", + "demand" + ], + "type": "string" } }, - "required": [ - "error", - "code", - "errorNum", - "errorMessage" - ], "type": "object" } } }, - "description": "Returned in the following cases:\n- No graph with this name can be found.\n- This collection is not part of the graph.\n- The vertex to replace does not exist.\n" + "description": "The result cache configuration has been changed successfully.\n" }, - "412": { + "400": { + "description": "The request is malformed.\n" + } + }, + "summary": "Set the AQL query results cache configuration", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/query-plan-cache": { + "delete": { + "description": "Clears all execution plans stored in the AQL query plan cache for the\ncurrent database.\n\nThis requires write privileges for the current database.\n", + "operationId": "deleteAqlQueryPlanCache", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { "content": { "application/json": { "schema": { "properties": { "code": { "description": "The HTTP response status code.\n", - "example": 412, + "example": 200, "type": "integer" }, "error": { - "description": "A flag indicating that an error occurred.\n", - "example": true, + "description": "A flag indicating that no error occurred.\n", + "example": false, "type": "boolean" - }, - "errorMessage": { - "description": "A descriptive error message.\n", - "type": "string" - }, - "errorNum": { - "description": "ArangoDB error number for the error that occurred.\n", - "type": "integer" } }, "required": [ "error", - "code", - "errorNum", - "errorMessage" + "code" ], "type": "object" } } }, - "description": "Returned if if-match header is given, but the stored documents revision is different.\n" + "description": "The query plan cache has been cleared for the current database.\n" } }, - "summary": "Replace a vertex", + "summary": "Clear the AQL query plan cache", "tags": [ - "Graphs" + "Queries" ] - } - }, - "/_api/import": { - "post": { - "description": "Load JSON data and store it as documents into the specified collection.\n\nThe request body can have different JSON formats:\n- One JSON object per line (JSONL)\n- A JSON array of objects\n- One JSON array per line (CSV-like)\n\nIf you import documents into edge collections, all documents require a `_from`\nand a `_to` attribute.\n", - "operationId": "importData", + }, + "get": { + "description": "Returns an array containing information about each AQL execution plan\ncurrently stored in the cache of the selected database.\n\nThis requires read privileges for the current database. In addition, only those\nquery plans are returned for which the current user has at least read permissions\non all collections and Views included in the query.\n", + "operationId": "listQueryCachePlans", "parameters": [ { - "description": "The name of the target collection. The collection needs to exist already.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } - }, - { - "description": "Determines how the body of the request is interpreted.\n\n- `documents`: JSON Lines (JSONL) format. Each line is expected to be one\n JSON object.\n\n Example:\n\n ```json\n {\"_key\":\"john\",\"name\":\"John Smith\",\"age\":35}\n {\"_key\":\"katie\",\"name\":\"Katie Foster\",\"age\":28}\n ```\n\n- `array` (or `list`): JSON format. The request body is expected to be a\n JSON array of objects. This format requires ArangoDB to parse the complete\n array and keep it in memory for the duration of the import. This is more\n resource-intensive than the line-wise JSONL processing.\n\n Any whitespace outside of strings is ignored, which means the JSON data can be\n a single line or be formatted as multiple lines.\n\n Example:\n\n ```json\n [\n {\"_key\":\"john\",\"name\":\"John Smith\",\"age\":35},\n {\"_key\":\"katie\",\"name\":\"Katie Foster\",\"age\":28}\n ]\n ```\n\n- `auto`: automatically determines the type (either `documents` or `array`).\n\n- Omit the `type` parameter entirely to import JSON arrays of tabular data,\n similar to CSV.\n\n The first line is an array of strings that defines the attribute keys. The\n subsequent lines are arrays with the attribute values. The keys and values\n are matched by the order of the array elements.\n\n Example:\n\n ```json\n [\"_key\",\"name\",\"age\"]\n [\"john\",\"John Smith\",35]\n [\"katie\",\"Katie Foster\",28]\n ```\n", - "in": "query", - "name": "type", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "An optional prefix for the values in `_from` attributes. If specified, the\nvalue is automatically prepended to each `_from` input value. This allows\nspecifying just the keys for `_from`.\n", - "in": "query", - "name": "fromPrefix", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "An optional prefix for the values in `_to` attributes. If specified, the\nvalue is automatically prepended to each `_to` input value. This allows\nspecifying just the keys for `_to`.\n", - "in": "query", - "name": "toPrefix", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "If this parameter has a value of `true` or `yes`, then all data in the\ncollection will be removed prior to the import. Note that any existing\nindex definitions will be preserved.\n", - "in": "query", - "name": "overwrite", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Wait until documents have been synced to disk before returning.\n", - "in": "query", - "name": "waitForSync", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Controls what action is carried out in case of a unique key constraint\nviolation. Possible values are:\n\n- `error`: this will not import the current document because of the unique\n key constraint violation. This is the default setting.\n- `update`: this will update an existing document in the database with the\n data specified in the request. Attributes of the existing document that\n are not present in the request will be preserved.\n- `replace`: this will replace an existing document in the database with the\n data specified in the request.\n- `ignore`: this will not update an existing document and simply ignore the\n error caused by a unique key constraint violation.\n\nNote that `update`, `replace` and `ignore` will only work when the\nimport document in the request contains the `_key` attribute. `update` and\n`replace` may also fail because of secondary unique key constraint violations.\n", - "in": "query", - "name": "onDuplicate", - "required": false, - "schema": { - "type": "string" - } - }, - { - "description": "If set to `true`, the whole import fails if any error occurs. Otherwise, the\nimport continues even if some documents are invalid and cannot be imported,\nskipping the problematic documents.\n", - "in": "query", - "name": "complete", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "If set to `true`, the result includes a `details` attribute with information\nabout documents that could not be imported.\n", - "in": "query", - "name": "details", - "required": false, - "schema": { - "type": "boolean" - } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "documents": { - "description": "The body must either be a JSON-encoded array of objects or a string with\nmultiple JSON objects separated by newlines.\n", - "type": "string" - } - }, - "required": [ - "documents" - ], - "type": "object" - } - } - } - }, "responses": { - "201": { + "200": { "content": { "application/json": { "schema": { - "properties": { - "created": { - "description": "The number of imported documents.\n", - "type": "integer" - }, - "details": { - "description": "An array with the error messages caused by documents that could not be imported.\nOnly present if `details` is set to `true`.\n", - "items": { + "description": "The entries of the query plan cache.\n", + "items": { + "description": "The properties of a cache entry.\n", + "properties": { + "bindVars": { + "description": "A subset of the original bind parameters with only the\ncollection bind parameters (e.g. `@@coll`). They need to\nhave the same names and values for utilizing a cached plan.\n", + "type": "object" + }, + "created": { + "description": "The date and time at which the query plan has been added\nto the cache (in ISO 8601 format).\n", + "format": "date-time", + "type": "string" + }, + "dataSources": { + "description": "The collections and Views involved in the query.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "fullCount": { + "description": "The value of the `fullCount` query option in the\noriginal query. This option generally leads to different\nexecution plans.\n", + "type": "boolean" + }, + "hash": { + "description": "The plan cache key.\n", "type": "string" }, - "type": "array" - }, - "empty": { - "description": "The number of empty lines found in the input. Only greater than zero for the\ntypes `documents` and `auto`.\n", - "type": "integer" - }, - "errors": { - "description": "The number of documents that were not imported due to errors.\n", - "type": "integer" - }, - "ignored": { - "description": "The number of failed but ignored insert operations. Only greater than zero if\n`onDuplicate` is set to `ignore`.\n", - "type": "integer" + "hits": { + "description": "How many times the cached plan has been utilized so far.\n", + "type": "integer" + }, + "memoryUsage": { + "description": "How much memory the plan cache entry takes up for the\nexecution plan, query string, and so on (in bytes).\n", + "type": "integer" + }, + "query": { + "description": "The query string.\n", + "type": "string" + }, + "queryHash": { + "description": "The hash value of the query string.\n", + "type": "integer" + } }, - "updated": { - "description": "The number of updated/replaced documents. Only greater than zero if `onDuplicate`\nis set to either `update` or `replace`.\n", - "type": "integer" - } + "required": [ + "hash", + "query", + "queryHash", + "bindVars", + "fullCount", + "dataSources", + "created", + "hits", + "memoryUsage" + ], + "type": "object" }, - "required": [ - "created", - "errors", - "empty", - "updated", - "ignored" - ], - "type": "object" + "type": "array" } } }, - "description": "is returned if all documents could be imported successfully.\n\nThe response is a JSON object with the following attributes:\n" - }, - "400": { - "description": "is returned if `type` contains an invalid value, no `collection` is\nspecified, the documents are incorrectly encoded, or the request\nis malformed.\n" - }, - "404": { - "description": "is returned if `collection` or the `_from` or `_to` attributes of an\nimported edge refer to an unknown collection.\n" - }, - "409": { - "description": "is returned if the import would trigger a unique key violation and\n`complete` is set to `true`.\n" - }, - "500": { - "description": "is returned if the server cannot auto-generate a document key (out of keys\nerror) for a document with no user-defined key.\n" + "description": "The list of cached query plans.\n" } }, - "summary": "Import JSON data as documents", + "summary": "List the entries of the AQL query plan cache", "tags": [ - "Import" + "Queries" ] } }, - "/_api/index": { + "/_db/{database-name}/_api/query/current": { "get": { - "description": "Returns an object with an `indexes` attribute containing an array of all\nindex descriptions for the given collection. The same information is also\navailable in the `identifiers` attribute as an object with the index identifiers\nas object keys.\n", - "operationId": "listIndexes", + "description": "Returns an array containing the AQL queries currently running in the selected\ndatabase. Each query is a JSON object with the following attributes:\n\n- `id`: the query's id\n\n- `database`: the name of the database the query runs in\n\n- `user`: the name of the user that started the query\n\n- `query`: the query string (potentially truncated)\n\n- `bindVars`: the bind parameter values used by the query\n\n- `started`: the date and time when the query was started\n\n- `runTime`: the query's run time up to the point the list of queries was\n queried\n\n- `peakMemoryUsage`: the query's peak memory usage in bytes (in increments of 32KB)\n\n- `state`: the query's current execution state (as a string). One of:\n - `\"initializing\"`\n - `\"parsing\"`\n - `\"optimizing ast\"`\n - `\"loading collections\"`\n - `\"instantiating plan\"`\n - `\"optimizing plan\"`\n - `\"instantiating executors\"`\n - `\"executing\"`\n - `\"finalizing\"`\n - `\"finished\"`\n - `\"killed\"`\n - `\"invalid\"`\n\n- `stream`: whether or not the query uses a streaming cursor\n", + "operationId": "listAqlQueries", "parameters": [ { - "description": "The collection name.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "Whether to include figures and estimates in the result.\n", - "in": "query", - "name": "withStats", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Whether to include hidden indexes in the result.\n", + "description": "If set to `true`, will return the currently running queries in all databases,\nnot just the selected one.\nUsing the parameter is only allowed in the `_system` database and with superuser\nprivileges.\n", "in": "query", - "name": "withHidden", + "name": "all", "required": false, "schema": { "type": "boolean" @@ -17435,146 +22954,59 @@ ], "responses": { "200": { - "description": "returns a JSON object containing a list of indexes on that collection.\n" - } - }, - "summary": "List all indexes of a collection", - "tags": [ - "Indexes" - ] - }, - "post": { - "description": "Creates a new index in the collection `collection`. Expects\nan object containing the index details.\n\nThe type of the index to be created must specified in the **type**\nattribute of the index details. Depending on the index type, additional\nother attributes may need to specified in the request in order to create\nthe index.\n\nIndexes require the to be indexed attribute(s) in the **fields** attribute\nof the index details. Depending on the index type, a single attribute or\nmultiple attributes can be indexed. In the latter case, an array of\nstrings is expected.\n\nThe `.` character denotes sub-attributes in attribute paths. Attributes with\nliteral `.` in their name cannot be indexed. Attributes with the name `_id`\ncannot be indexed either, neither as a top-level attribute nor as a sub-attribute.\n\nOptionally, an index name may be specified as a string in the **name** attribute.\nIndex names have the same restrictions as collection names. If no value is\nspecified, one will be auto-generated.\n\nPersistent indexes (including vertex-centric indexes) can be created as unique\nor non-unique variants. Uniqueness can be controlled by specifying the\n**unique** option for the index definition. Setting it to `true` creates a\nunique index. Setting it to `false` or omitting the `unique` attribute creates a\nnon-unique index.\n\n\u003e **INFO:**\nUnique indexes on non-shard keys are not supported in cluster deployments.\n\n\nPersistent indexes can optionally be created in a sparse\nvariant. A sparse index will be created if the **sparse** attribute in\nthe index details is set to `true`. Sparse indexes do not index documents\nfor which any of the index attributes is either not set or is `null`.\n\nThe optional **deduplicate** attribute is supported by persistent array indexes.\nIt controls whether inserting duplicate index values\nfrom the same document into a unique array index will lead to a unique constraint\nerror or not. The default value is `true`, so only a single instance of each\nnon-unique index value will be inserted into the index per document. Trying to\ninsert a value into the index that already exists in the index always fails,\nregardless of the value of this attribute.\n\nThe optional **estimates** attribute is supported by persistent indexes.\nThis attribute controls whether index selectivity estimates are\nmaintained for the index. Not maintaining index selectivity estimates can have\na slightly positive impact on write performance.\nThe downside of turning off index selectivity estimates will be that\nthe query optimizer will not be able to determine the usefulness of different\ncompeting indexes in AQL queries when there are multiple candidate indexes to\nchoose from.\nThe `estimates` attribute is optional and defaults to `true` if not set. It will\nhave no effect on indexes other than persistent indexes.\n\nThe optional attribute **cacheEnabled** is supported by indexes of type\n`persistent`. This attribute controls whether an extra in-memory hash cache is\ncreated for the index. The hash cache can be used to speed up index lookups.\nThe cache can only be used for queries that look up all index attributes via\nan equality lookup (`==`). The hash cache cannot be used for range scans,\npartial lookups or sorting.\nThe cache will be populated lazily upon reading data from the index. Writing data\ninto the collection or updating existing data will invalidate entries in the\ncache. The cache may have a negative effect on performance in case index values\nare updated more often than they are read.\nThe maximum size of cache entries that can be stored is currently 4 MB, i.e.\nthe cumulated size of all index entries for any index lookup value must be\nless than 4 MB. This limitation is there to avoid storing the index entries\nof \"super nodes\" in the cache.\n`cacheEnabled` defaults to `false` and should only be used for indexes that\nare known to benefit from an extra layer of caching.\n\nThe optional attribute **inBackground** can be set to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground.\n", - "operationId": "createIndex", - "parameters": [ - { - "description": "The collection name.\n", - "in": "query", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "index-details": { - "description": "The options for the index.\n", - "type": "object" - } - }, - "required": [ - "index-details" - ], - "type": "object" - } - } - } - }, - "responses": { - "200": { - "description": "If the index already exists, then an *HTTP 200* is returned.\n" - }, - "201": { - "description": "If the index does not already exist and could be created, then an *HTTP 201*\nis returned.\n" + "description": "Is returned when the list of queries can be retrieved successfully.\n" }, "400": { - "description": "If an invalid index description is posted or attributes are used that the\ntarget index will not support, then an *HTTP 400* is returned.\n" + "description": "The request is malformed.\n" }, - "404": { - "description": "If `collection` is unknown, then an *HTTP 404* is returned.\n" + "403": { + "description": "In case the `all` parameter is used but the request was made in a\ndifferent database than `_system`, or by a non-privileged user.\n" } }, - "summary": "Create an index", + "summary": "List the running AQL queries", "tags": [ - "Indexes" + "Queries" ] } }, - "/_api/index#fulltext": { - "post": { - "description": "\u003e **WARNING:**\nThe fulltext index type is deprecated from version 3.10 onwards.\n\n\nCreates a fulltext index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n", - "operationId": "createIndexFulltext", + "/_db/{database-name}/_api/query/properties": { + "get": { + "description": "Returns the current query tracking configuration. The configuration is a\nJSON object with the following properties:\n\n- `enabled`: if set to `true`, then queries will be tracked. If set to\n `false`, neither queries nor slow queries will be tracked.\n\n- `trackSlowQueries`: if set to `true`, then slow queries will be tracked\n in the list of slow queries if their runtime exceeds the value set in\n `slowQueryThreshold`. In order for slow queries to be tracked, the `enabled`\n property must also be set to `true`.\n\n- `trackBindVars`: if set to `true`, then bind variables used in queries will\n be tracked.\n\n- `maxSlowQueries`: the maximum number of slow queries to keep in the list\n of slow queries. If the list of slow queries is full, the oldest entry in\n it will be discarded when additional slow queries occur.\n\n- `slowQueryThreshold`: the threshold value for treating a query as slow. A\n query with a runtime greater or equal to this threshold value will be\n put into the list of slow queries when slow query tracking is enabled.\n The value for `slowQueryThreshold` is specified in seconds.\n\n- `maxQueryStringLength`: the maximum query string length to keep in the\n list of queries. Query strings can have arbitrary lengths, and this property\n can be used to save memory in case very long query strings are used. The\n value is specified in bytes.\n", + "operationId": "getAqlQueryTrackingProperties", "parameters": [ { - "description": "The collection name.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "fields": { - "description": "an array of attribute names. Currently, the array is limited\nto exactly one attribute.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "inBackground": { - "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", - "type": "boolean" - }, - "minLength": { - "description": "Minimum character length of words to index. Will default\nto a server-defined value if unspecified. It is thus recommended to set\nthis value explicitly when creating the index.\n", - "type": "integer" - }, - "name": { - "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", - "type": "string" - }, - "type": { - "description": "must be equal to `\"fulltext\"`.\n", - "type": "string" - } - }, - "required": [ - "type", - "fields", - "minLength" - ], - "type": "object" - } - } - } - }, "responses": { "200": { - "description": "If the index already exists, then a *HTTP 200* is\nreturned.\n" - }, - "201": { - "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + "description": "Is returned if properties were retrieved successfully.\n" }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + "400": { + "description": "The request is malformed.\n" } }, - "summary": "Create a full-text index", + "summary": "Get the AQL query tracking configuration", "tags": [ - "Indexes" + "Queries" ] - } - }, - "/_api/index#geo": { - "post": { - "description": "Creates a geo-spatial index in the collection `collection`, if\nit does not already exist. Expects an object containing the index details.\n\nGeo indexes are always sparse, meaning that documents that do not contain\nthe index attributes or have non-numeric values in the index attributes\nwill not be indexed.\n", - "operationId": "createIndexGeo", + }, + "put": { + "description": "The properties need to be passed in the attribute `properties` in the body\nof the HTTP request. `properties` needs to be a JSON object.\n\nAfter the properties have been changed, the current set of properties will\nbe returned in the HTTP response.\n", + "operationId": "updateAqlQueryTrackingProperties", "parameters": [ { - "description": "The collection name.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -17586,37 +23018,38 @@ "application/json": { "schema": { "properties": { - "fields": { - "description": "An array with one or two attribute paths.\n\nIf it is an array with one attribute path `location`, then a geo-spatial\nindex on all documents is created using `location` as path to the\ncoordinates. The value of the attribute must be an array with at least two\ndouble values. The array must contain the latitude (first value) and the\nlongitude (second value). All documents, which do not have the attribute\npath or with value that are not suitable, are ignored.\n\nIf it is an array with two attribute paths `latitude` and `longitude`,\nthen a geo-spatial index on all documents is created using `latitude`\nand `longitude` as paths the latitude and the longitude. The values of\nthe `latitude` and `longitude` attributes must each be a number (double).\nAll documents which do not have the attribute paths or which have\nvalues that are not suitable are ignored.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "geoJson": { - "description": "If a geo-spatial index on a `location` is constructed\nand `geoJson` is `true`, then the order within the array is longitude\nfollowed by latitude. This corresponds to the format described in\nhttp://geojson.org/geojson-spec.html#positions\n", + "enabled": { + "description": "If set to `true`, then queries will be tracked. If set to\n`false`, neither queries nor slow queries will be tracked.\n", "type": "boolean" }, - "inBackground": { - "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", - "type": "boolean" + "maxQueryStringLength": { + "description": "The maximum query string length to keep in the list of queries.\nQuery strings can have arbitrary lengths, and this property\ncan be used to save memory in case very long query strings are used. The\nvalue is specified in bytes.\n", + "type": "integer" }, - "legacyPolygons": { - "description": "If `geoJson` is set to `true`, then this option controls how GeoJSON Polygons\nare interpreted.\n\n- If `legacyPolygons` is `true`, the smaller of the two regions defined by a\n linear ring is interpreted as the interior of the ring and a ring can at most\n enclose half the Earth's surface.\n- If `legacyPolygons` is `false`, the area to the left of the boundary ring's\n path is considered to be the interior and a ring can enclose the entire\n surface of the Earth.\n\nThe default is `true` for geo indexes that were created in versions before 3.10,\nand `false` for geo indexes created in 3.10 or later.\n", - "type": "boolean" + "maxSlowQueries": { + "description": "The maximum number of slow queries to keep in the list\nof slow queries. If the list of slow queries is full, the oldest entry in\nit will be discarded when additional slow queries occur.\n", + "type": "integer" }, - "name": { - "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", - "type": "string" + "slowQueryThreshold": { + "description": "The threshold value for treating a query as slow. A\nquery with a runtime greater or equal to this threshold value will be\nput into the list of slow queries when slow query tracking is enabled.\nThe value for `slowQueryThreshold` is specified in seconds.\n", + "type": "integer" }, - "type": { - "description": "must be equal to `\"geo\"`.\n", - "type": "string" + "trackBindVars": { + "description": "If set to `true`, then the bind variables used in queries will be tracked\nalong with queries.\n", + "type": "boolean" + }, + "trackSlowQueries": { + "description": "If set to `true`, then slow queries will be tracked\nin the list of slow queries if their runtime exceeds the value set in\n`slowQueryThreshold`. In order for slow queries to be tracked, the `enabled`\nproperty must also be set to `true`.\n", + "type": "boolean" } }, "required": [ - "type", - "fields" + "enabled", + "trackSlowQueries", + "trackBindVars", + "maxSlowQueries", + "slowQueryThreshold", + "maxQueryStringLength" ], "type": "object" } @@ -17625,509 +23058,300 @@ }, "responses": { "200": { - "description": "If the index already exists, then a *HTTP 200* is returned.\n" - }, - "201": { - "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + "description": "Is returned if the properties were changed successfully.\n" }, - "404": { - "description": "If the `collection` is unknown, then a *HTTP 404* is returned.\n" + "400": { + "description": "The request is malformed.\n" } }, - "summary": "Create a geo-spatial index", + "summary": "Update the AQL query tracking configuration", "tags": [ - "Indexes" + "Queries" ] } }, - "/_api/index#inverted": { - "post": { - "description": "Creates an inverted index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n", - "operationId": "createIndexInverted", + "/_db/{database-name}/_api/query/rules": { + "get": { + "description": "A list of all optimizer rules and their properties.\n", + "operationId": "getAqlQueryOptimizerRules", "parameters": [ { - "description": "The collection name.\n", - "in": "query", - "name": "collection", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "analyzer": { - "description": "The name of an Analyzer to use by default. This Analyzer is applied to the\nvalues of the indexed fields for which you don't define Analyzers explicitly.\n\nDefault: `identity`\n", - "type": "string" - }, - "cache": { - "description": "Enable this option to always cache the field normalization values in memory\nfor all fields by default. This can improve the performance of scoring and\nranking queries. Otherwise, these values are memory-mapped and it is up to the\noperating system to load them from disk into memory and to evict them from memory.\n\nNormalization values are computed for fields which are processed with Analyzers\nthat have the `\"norm\"` feature enabled. These values are used to score fairer if\nthe same tokens occur repeatedly, to emphasize these documents less.\n\nYou can also enable this option to always cache auxiliary data used for querying\nfields that are indexed with Geo Analyzers in memory for all fields by default.\nThis can improve the performance of geo-spatial queries.\n\nDefault: `false`\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" - }, - "cleanupIntervalStep": { - "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (default: 2, to disable use: 0).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation, a new state of the\n inverted index' internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", - "type": "integer" - }, - "commitIntervalMsec": { - "description": "Wait at least this many milliseconds between committing inverted index data store\nchanges and making documents visible to queries (default: 1000, to disable\nuse: 0).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", - "type": "integer" - }, - "consolidationIntervalMsec": { - "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the inverted index data store and possibly release space on the filesystem\n(default: 1000, to disable use: 0).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", - "type": "integer" - }, - "consolidationPolicy": { - "description": "The consolidation policy to apply for selecting which segments should be merged\n(default: {}).\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "description": "An array of objects. Each object describes an AQL optimizer rule.\n", + "items": { "properties": { - "minScore": { - "description": "Filter out consolidation candidates with a score less than this. Default: `0`\n", - "type": "integer" - }, - "segmentsBytesFloor": { - "description": "Defines the value (in bytes) to treat all smaller segments as equal for\nconsolidation selection. Default: `2097152`\n", - "type": "integer" - }, - "segmentsBytesMax": { - "description": "The maximum allowed size of all consolidated segments in bytes.\nDefault: `5368709120`\n", - "type": "integer" - }, - "segmentsMax": { - "description": "The maximum number of segments that are evaluated as candidates for\nconsolidation. Default: `10`\n", - "type": "integer" - }, - "segmentsMin": { - "description": "The minimum number of segments that are evaluated as candidates for\nconsolidation. Default: `1`\n", - "type": "integer" - }, - "type": { - "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe supported types are:\n\n- `\"tier\"` (default): consolidate based on segment byte size and live\n document count as dictated by the customization attributes.\n", - "type": "string" - } - }, - "type": "object" - }, - "features": { - "description": "A list of Analyzer features. You can set this option to overwrite what features\nare enabled for the default `analyzer`. Possible features:\n- `\"frequency\"`\n- `\"norm\"`\n- `\"position\"`\n- `\"offset\"`\n\nDefault: the features as defined by the Analyzer itself.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "fields": { - "description": "An array of attribute paths. You can use strings to index the fields with the\ndefault options, or objects to specify options for the fields (with the\nattribute path in the `name` property), or a mix of both.\n", - "items": { - "properties": { - "analyzer": { - "description": "The name of an Analyzer to use for this field.\n\nDefault: the value defined by the top-level `analyzer` option.\n", - "type": "string" - }, - "cache": { - "description": "Enable this option to always cache the field normalization values in memory\nfor this specific field. This can improve the performance of scoring and\nranking queries. Otherwise, these values are memory-mapped and it is up to the\noperating system to load them from disk into memory and to evict them from memory.\n\nNormalization values are computed for fields which are processed with Analyzers\nthat have the `\"norm\"` feature enabled. These values are used to score fairer if\nthe same tokens occur repeatedly, to emphasize these documents less.\n\nYou can also enable this option to always cache auxiliary data used for querying\nfields that are indexed with Geo Analyzers in memory for this specific field.\nThis can improve the performance of geo-spatial queries.\n\nDefault: the value defined by the top-level `cache` option.\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" - }, - "features": { - "description": "A list of Analyzer features to use for this field. You can set this option to\noverwrite what features are enabled for the `analyzer`. Possible features:\n- `\"frequency\"`\n- `\"norm\"`\n- `\"position\"`\n- `\"offset\"`\n\nDefault: the features as defined by the Analyzer itself, or inherited from the\ntop-level `features` option if the `analyzer` option adjacent to this option is\nnot set.\n", - "items": { - "type": "string" + "flags": { + "description": "An object with the properties of the rule.\n", + "properties": { + "canBeDisabled": { + "description": "Whether users are allowed to disable this rule. A few rules are mandatory.\n", + "type": "boolean" }, - "type": "array" - }, - "includeAllFields": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nIf set to `true`, then all sub-attributes of this field are indexed, excluding\nany sub-attributes that are configured separately by other elements in the\n`fields` array (and their sub-attributes). The `analyzer` and `features`\nproperties apply to the sub-attributes.\n\nIf set to `false`, then sub-attributes are ignored.\n\nDefault: the value defined by the top-level `includeAllFields` option.\n", - "type": "boolean" - }, - "name": { - "description": "An attribute path. The `.` character denotes sub-attributes.\nYou can expand one array attribute with `[*]`.\n", - "type": "string" - }, - "nested": { - "description": "Index the specified sub-objects that are stored in an array. Other than with the\n`fields` property, the values get indexed in a way that lets you query for\nco-occurring values. For example, you can search the sub-objects and all the\nconditions need to be met by a single sub-object instead of across all of them.\n\nThis property is available in the Enterprise Edition only.\n", - "items": { - "properties": { - "analyzer": { - "description": "The name of an Analyzer to use for this field.\nDefault: the value defined by the parent field, or the top-level `analyzer` option.\n", - "type": "string" - }, - "cache": { - "description": "Enable this option to always cache the field normalization values in memory\nfor this specific nested field. This can improve the performance of scoring and\nranking queries. Otherwise, these values are memory-mapped and it is up to the\noperating system to load them from disk into memory and to evict them from memory.\n\nNormalization values are computed for fields which are processed with Analyzers\nthat have the `\"norm\"` feature enabled. These values are used to score fairer if\nthe same tokens occur repeatedly, to emphasize these documents less.\n\nYou can also enable this option to always cache auxiliary data used for querying\nfields that are indexed with Geo Analyzers in memory for this specific nested field.\nThis can improve the performance of geo-spatial queries.\n\nDefault: the value defined by the top-level `cache` option.\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" - }, - "features": { - "description": "A list of Analyzer features to use for this field. You can set this option to\noverwrite what features are enabled for the `analyzer`. Possible features:\n- `\"frequency\"`\n- `\"norm\"`\n- `\"position\"`\n- `\"offset\"`\n\nDefault: the features as defined by the Analyzer itself, or inherited from the\nparent field's or top-level `features` option if no `analyzer` option is set\nat a deeper level, closer to this option.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "name": { - "description": "An attribute path. The `.` character denotes sub-attributes.\n", - "type": "string" - }, - "nested": { - "description": "You can recursively index sub-objects. See the above description of the\n`nested` option.\n", - "items": { - "type": "object" - }, - "type": "array" - }, - "searchField": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nYou can set the option to `true` to get the same behavior as with `arangosearch`\nViews regarding the indexing of array values for this field. If enabled, both,\narray and primitive values (strings, numbers, etc.) are accepted. Every element\nof an array is indexed according to the `trackListPositions` option.\n\nIf set to `false`, it depends on the attribute path. If it explicitly expands an\narray (`[*]`), then the elements are indexed separately. Otherwise, the array is\nindexed as a whole, but only `geopoint` and `aql` Analyzers accept array inputs.\nYou cannot use an array expansion if `searchField` is enabled.\n\nDefault: the value defined by the top-level `searchField` option.\n", - "type": "boolean" - } - }, - "required": [ - "name" - ], - "type": "object" + "canCreateAdditionalPlans": { + "description": "Whether this rule may create additional query execution plans.\n", + "type": "boolean" }, - "type": "array" - }, - "searchField": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nYou can set the option to `true` to get the same behavior as with `arangosearch`\nViews regarding the indexing of array values for this field. If enabled, both,\narray and primitive values (strings, numbers, etc.) are accepted. Every element\nof an array is indexed according to the `trackListPositions` option.\n\nIf set to `false`, it depends on the attribute path. If it explicitly expands an\narray (`[*]`), then the elements are indexed separately. Otherwise, the array is\nindexed as a whole, but only `geopoint` and `aql` Analyzers accept array inputs.\nYou cannot use an array expansion if `searchField` is enabled.\n\nDefault: the value defined by the top-level `searchField` option.\n", - "type": "boolean" - }, - "trackListPositions": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views,\nand `searchField` needs to be `true`.\n\nIf set to `true`, then track the value position in arrays for array values.\nFor example, when querying a document like `{ attr: [ \"valueX\", \"valueY\", \"valueZ\" ] }`,\nyou need to specify the array element, e.g. `doc.attr[1] == \"valueY\"`.\n\nIf set to `false`, all values in an array are treated as equal alternatives.\nYou don't specify an array element in queries, e.g. `doc.attr == \"valueY\"`, and\nall elements are searched for a match.\n\nDefault: the value defined by the top-level `trackListPositions` option.\n", - "type": "boolean" - } - }, - "required": [ - "name" - ], - "type": "object" - }, - "type": "array" - }, - "inBackground": { - "description": "This attribute can be set to `true` to create the index\nin the background, not write-locking the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", - "type": "boolean" - }, - "includeAllFields": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nIf set to `true`, then all document attributes are indexed, excluding any\nsub-attributes that are configured in the `fields` array (and their sub-attributes).\nThe `analyzer` and `features` properties apply to the sub-attributes.\n\nDefault: `false`\n\n\u003e **WARNING:**\nUsing `includeAllFields` for a lot of attributes in combination\nwith complex Analyzers may significantly slow down the indexing process.\n", - "type": "boolean" - }, - "name": { - "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", - "type": "string" - }, - "optimizeTopK": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nAn array of strings defining sort expressions that you want to optimize.\nThis is also known as _WAND optimization_ (introduced in v3.12.0).\n\nIf you query a View with the `SEARCH` operation in combination with a\n`SORT` and `LIMIT` operation, search results can be retrieved faster if the\n`SORT` expression matches one of the optimized expressions.\n\nOnly sorting by highest rank is supported, that is, sorting by the result\nof a scoring function in descending order (`DESC`). Use `@doc` in the expression\nwhere you would normally pass the document variable emitted by the `SEARCH`\noperation to the scoring function.\n\nYou can define up to 64 expressions per View.\n\nExample: `[\"BM25(@doc) DESC\", \"TFIDF(@doc, true) DESC\"]`\n\nDefault: `[]`\n\nThis property is available in the Enterprise Edition only.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "parallelism": { - "description": "The number of threads to use for indexing the fields. Default: `2`\n", - "type": "integer" - }, - "primaryKeyCache": { - "description": "Enable this option to always cache the primary key column in memory. This can\nimprove the performance of queries that return many documents. Otherwise, these\nvalues are memory-mapped and it is up to the operating system to load them from\ndisk into memory and to evict them from memory.\n\nDefault: `false`\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" - }, - "primarySort": { - "description": "You can define a primary sort order to enable an AQL optimization. If a query\niterates over all documents of a collection, wants to sort them by attribute values,\nand the (left-most) fields to sort by, as well as their sorting direction, match\nwith the `primarySort` definition, then the `SORT` operation is optimized away.\n", - "properties": { - "cache": { - "description": "Enable this option to always cache the primary sort columns in memory. This can\nimprove the performance of queries that utilize the primary sort order.\nOtherwise, these values are memory-mapped and it is up to the operating system\nto load them from disk into memory and to evict them from memory.\n\nDefault: `false`\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" - }, - "compression": { - "description": "Defines how to compress the primary sort data. Possible values:\n- `\"lz4\"` (default): use LZ4 fast compression.\n- `\"none\"`: disable compression to trade space for speed.\n", - "type": "string" - }, - "fields": { - "description": "An array of the fields to sort the index by and the direction to sort each field in.\n", - "items": { - "properties": { - "direction": { - "description": "The sorting direction. Possible values:\n- `\"asc` for ascending\n- `\"desc\"` for descending\n", - "type": "string" - }, - "field": { - "description": "An attribute path. The `.` character denotes sub-attributes.\n", - "type": "string" - } + "clusterOnly": { + "description": "Whether the rule is applicable in the cluster deployment mode only.\n", + "type": "boolean" }, - "required": [ - "field", - "direction" - ], - "type": "object" + "disabledByDefault": { + "description": "Whether the optimizer considers this rule by default.\n", + "type": "boolean" + }, + "enterpriseOnly": { + "description": "Whether the rule is available in the Enterprise Edition only.\n", + "type": "boolean" + }, + "hidden": { + "description": "Whether the rule is displayed to users. Internal rules are hidden.\n", + "type": "boolean" + } }, - "type": "array" + "required": [ + "hidden", + "clusterOnly", + "canBeDisabled", + "canCreateAdditionalPlans", + "disabledByDefault", + "enterpriseOnly" + ], + "type": "object" + }, + "name": { + "description": "The name of the optimizer rule as seen in query explain outputs.\n", + "type": "string" } }, "required": [ - "fields" + "name", + "flags" ], "type": "object" }, - "searchField": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views.\n\nYou can set the option to `true` to get the same behavior as with `arangosearch`\nViews regarding the indexing of array values as the default. If enabled, both,\narray and primitive values (strings, numbers, etc.) are accepted. Every element\nof an array is indexed according to the `trackListPositions` option.\n\nIf set to `false`, it depends on the attribute path. If it explicitly expands an\narray (`[*]`), then the elements are indexed separately. Otherwise, the array is\nindexed as a whole, but only `geopoint` and `aql` Analyzers accept array inputs.\nYou cannot use an array expansion if `searchField` is enabled.\n\nDefault: `false`\n", - "type": "boolean" - }, - "storedValues": { - "description": "The optional `storedValues` attribute can contain an array of objects with paths\nto additional attributes to store in the index. These additional attributes\ncannot be used for index lookups or for sorting, but they can be used for\nprojections. This allows an index to fully cover more queries and avoid extra\ndocument lookups.\n\nYou may use the following shorthand notations on index creation instead of\nan array of objects. The default compression and cache settings are used in\nthis case:\n\n- An array of strings, like `[\"attr1\", \"attr2\"]`, to place each attribute into\n a separate column of the index (introduced in v3.10.3).\n\n- An array of arrays of strings, like `[[\"attr1\", \"attr2\"]]`, to place the\n attributes into a single column of the index, or `[[\"attr1\"], [\"attr2\"]]`\n to place each attribute into a separate column. You can also mix it with the\n full form:\n\n ```json\n [\n [\"attr1\"],\n [\"attr2\", \"attr3\"],\n { \"fields\": [\"attr4\", \"attr5\"], \"cache\": true }\n ]\n ```\n", - "items": { - "properties": { - "cache": { - "description": "Enable this option to always cache stored values in memory. This can improve the\nquery performance if stored values are involved. Otherwise, these values are\nmemory-mapped and it is up to the operating system to load them from disk into\nmemory and to evict them from memory.\n\nDefault: `false`\n\nThis property is available in the Enterprise Edition only.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" - }, - "compression": { - "description": "Defines how to compress the attribute values. Possible values:\n- `\"lz4\"` (default): use LZ4 fast compression.\n- `\"none\"`: disable compression to trade space for speed.\n", - "type": "string" - }, - "fields": { - "description": "A list of attribute paths. The `.` character denotes sub-attributes.\n", - "items": { - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "fields" - ], - "type": "object" - }, - "type": "array" - }, - "trackListPositions": { - "description": "This option only applies if you use the inverted index in a `search-alias` Views,\nand `searchField` needs to be `true`.\n\nIf set to `true`, then track the value position in arrays for array values.\nFor example, when querying a document like `{ attr: [ \"valueX\", \"valueY\", \"valueZ\" ] }`,\nyou need to specify the array element, e.g. `doc.attr[1] == \"valueY\"`.\n\nIf set to `false`, all values in an array are treated as equal alternatives.\nYou don't specify an array element in queries, e.g. `doc.attr == \"valueY\"`, and\nall elements are searched for a match.\n", - "type": "boolean" - }, - "type": { - "description": "Must be equal to `\"inverted\"`.\n", - "type": "string" - }, - "writebufferActive": { - "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (default: 0, use 0 to disable)\n", - "type": "integer" - }, - "writebufferIdle": { - "description": "Maximum number of writers (segments) cached in the pool\n(default: 64, use 0 to disable)\n", - "type": "integer" - }, - "writebufferSizeMax": { - "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(ArangoDB server startup option). `0` value should be used carefully due to\nhigh potential memory consumption\n(default: 33554432, use 0 to disable)\n", - "type": "integer" - } - }, - "required": [ - "type", - "fields" - ], - "type": "object" + "type": "array" + } } - } + }, + "description": "is returned if the list of optimizer rules can be retrieved successfully.\n" } }, + "summary": "List all AQL optimizer rules", + "tags": [ + "Queries" + ] + } + }, + "/_db/{database-name}/_api/query/slow": { + "delete": { + "description": "Clears the list of slow AQL queries for the current database.\n", + "operationId": "clearSlowAqlQueryList", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "If set to `true`, will clear the slow query history in all databases, not just\nthe selected one.\nUsing the parameter is only allowed in the `_system` database and with superuser\nprivileges.\n", + "in": "query", + "name": "all", + "required": false, + "schema": { + "type": "boolean" + } + } + ], "responses": { "200": { - "description": "If the index already exists, then a *HTTP 200* is returned.\n" - }, - "201": { - "description": "If the index does not already exist and can be created, then a *HTTP 201*\nis returned.\n" + "description": "The list of queries has been cleared successfully.\n" }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + "400": { + "description": "The request is malformed.\n" } }, - "summary": "Create an inverted index", + "summary": "Clear the list of slow AQL queries", "tags": [ - "Indexes" + "Queries" ] - } - }, - "/_api/index#persistent": { - "post": { - "description": "Creates a persistent index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n\nIn a sparse index all documents will be excluded from the index that do not\ncontain at least one of the specified index attributes (i.e. `fields`) or that\nhave a value of `null` in any of the specified index attributes. Such documents\nwill not be indexed, and not be taken into account for uniqueness checks if\nthe `unique` flag is set.\n\nIn a non-sparse index, these documents will be indexed (for non-present\nindexed attributes, a value of `null` will be used) and will be taken into\naccount for uniqueness checks if the `unique` flag is set.\n\n\u003e **INFO:**\nUnique indexes on non-shard keys are not supported in cluster deployments.\n", - "operationId": "createIndexPersistent", + }, + "get": { + "description": "Returns an array containing the last AQL queries that are finished and\nhave exceeded the slow query threshold in the selected database.\nThe maximum amount of queries in the list can be controlled by setting\nthe query tracking property `maxSlowQueries`. The threshold for treating\na query as *slow* can be adjusted by setting the query tracking property\n`slowQueryThreshold`.\n\nEach query is a JSON object with the following attributes:\n\n- `id`: the query's id\n\n- `database`: the name of the database the query runs in\n\n- `user`: the name of the user that started the query\n\n- `query`: the query string (potentially truncated)\n\n- `bindVars`: the bind parameter values used by the query\n\n- `started`: the date and time when the query was started\n\n- `runTime`: the query's total run time\n\n- `peakMemoryUsage`: the query's peak memory usage in bytes (in increments of 32KB)\n\n- `state`: the query's current execution state (will always be \"finished\"\n for the list of slow queries)\n\n- `stream`: whether or not the query uses a streaming cursor\n", + "operationId": "listSlowAqlQueries", "parameters": [ { - "description": "The collection name.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "cacheEnabled": { - "description": "This attribute controls whether an extra in-memory hash cache is\ncreated for the index. The hash cache can be used to speed up index lookups.\nThe cache can only be used for queries that look up all index attributes via\nan equality lookup (`==`). The hash cache cannot be used for range scans,\npartial lookups or sorting.\n\nThe cache will be populated lazily upon reading data from the index. Writing data\ninto the collection or updating existing data will invalidate entries in the\ncache. The cache may have a negative effect on performance in case index values\nare updated more often than they are read.\n\nThe maximum size of cache entries that can be stored is currently 4 MB, i.e.\nthe cumulated size of all index entries for any index lookup value must be\nless than 4 MB. This limitation is there to avoid storing the index entries\nof \"super nodes\" in the cache.\n\n`cacheEnabled` defaults to `false` and should only be used for indexes that\nare known to benefit from an extra layer of caching.\n", - "type": "boolean" - }, - "deduplicate": { - "description": "The attribute controls whether inserting duplicate index values\nfrom the same document into a unique array index will lead to a unique constraint\nerror or not. The default value is `true`, so only a single instance of each\nnon-unique index value will be inserted into the index per document. Trying to\ninsert a value into the index that already exists in the index will always fail,\nregardless of the value of this attribute.\n", - "type": "boolean" - }, - "estimates": { - "description": "This attribute controls whether index selectivity estimates are maintained for the\nindex. Not maintaining index selectivity estimates can have a slightly positive\nimpact on write performance.\n\nThe downside of turning off index selectivity estimates will be that\nthe query optimizer will not be able to determine the usefulness of different\ncompeting indexes in AQL queries when there are multiple candidate indexes to\nchoose from.\n\nThe `estimates` attribute is optional and defaults to `true` if not set. It will\nhave no effect on indexes other than `persistent`.\n", - "type": "boolean" - }, - "fields": { - "description": "An array of attribute paths.\n\nThe `.` character denotes sub-attributes in attribute paths. Attributes with\nliteral `.` in their name cannot be indexed. Attributes with the name `_id`\ncannot be indexed either, neither as a top-level attribute nor as a sub-attribute.\n\nYou can expand one array attribute with `[*]`.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "inBackground": { - "description": "This attribute can be set to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", - "type": "boolean" - }, - "name": { - "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", - "type": "string" - }, - "sparse": { - "description": "If `true`, then create a sparse index. Defaults to `false`.\n", - "type": "boolean" - }, - "storedValues": { - "description": "The optional `storedValues` attribute can contain an array of paths to additional\nattributes to store in the index. These additional attributes cannot be used for\nindex lookups or for sorting, but they can be used for projections. This allows an\nindex to fully cover more queries and avoid extra document lookups.\nThe maximum number of attributes in `storedValues` is 32.\n\nIt is not possible to create multiple indexes with the same `fields` attributes\nand uniqueness but different `storedValues` attributes. That means the value of\n`storedValues` is not considered by index creation calls when checking if an\nindex is already present or needs to be created.\n\nIn unique indexes, only the attributes in `fields` are checked for uniqueness,\nbut the attributes in `storedValues` are not checked for their uniqueness.\nNon-existing attributes are stored as `null` values inside `storedValues`.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "type": { - "description": "Must be equal to `\"persistent\"`.\n", - "type": "string" - }, - "unique": { - "description": "If `true`, then create a unique index. Defaults to `false`.\nIn unique indexes, only the attributes in `fields` are checked for uniqueness,\nbut the attributes in `storedValues` are not checked for their uniqueness.\n", - "type": "boolean" - } - }, - "required": [ - "type", - "fields" - ], - "type": "object" - } + }, + { + "description": "If set to `true`, will return the slow queries from all databases, not just\nthe selected one.\nUsing the parameter is only allowed in the `_system` database and with superuser\nprivileges.\n", + "in": "query", + "name": "all", + "required": false, + "schema": { + "type": "boolean" } } - }, + ], "responses": { "200": { - "description": "If the index already exists, then a *HTTP 200* is\nreturned.\n" - }, - "201": { - "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + "description": "Is returned when the list of queries can be retrieved successfully.\n" }, "400": { - "description": "If the collection already contains documents and you try to create a unique\npersistent index in such a way that there are documents violating the\nuniqueness, then a *HTTP 400* is returned.\n" + "description": "The request is malformed.\n" }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + "403": { + "description": "In case the `all` parameter is used but the request was made in a\ndifferent database than `_system`, or by a non-privileged user.\n" } }, - "summary": "Create a persistent index", + "summary": "List the slow AQL queries", "tags": [ - "Indexes" + "Queries" ] } }, - "/_api/index#ttl": { - "post": { - "description": "Creates a time-to-live (TTL) index for the collection `collection-name` if it\ndoes not already exist. The call expects an object containing the index\ndetails.\n", - "operationId": "createIndexTtl", + "/_db/{database-name}/_api/query/{query-id}": { + "delete": { + "description": "Kills a running query in the currently selected database. The query will be\nterminated at the next cancelation point.\n", + "operationId": "deleteAqlQuery", "parameters": [ { - "description": "The collection name.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "expireAfter": { - "description": "The time interval (in seconds) from the point in time stored in the `fields`\nattribute after which the documents count as expired. Can be set to `0` to let\ndocuments expire as soon as the server time passes the point in time stored in\nthe document attribute, or to a higher number to delay the expiration.\n", - "type": "number" - }, - "fields": { - "description": "an array with exactly one attribute path.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "inBackground": { - "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", - "type": "boolean" - }, - "name": { - "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", - "type": "string" - }, - "type": { - "description": "must be equal to `\"ttl\"`.\n", - "type": "string" - } - }, - "required": [ - "type", - "fields", - "expireAfter" - ], - "type": "object" - } + }, + { + "description": "The identifier of the query.\n", + "in": "path", + "name": "query-id", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "If set to `true`, will attempt to kill the specified query in all databases,\nnot just the selected one.\nUsing the parameter is only allowed in the `_system` database and with superuser\nprivileges.\n", + "in": "query", + "name": "all", + "required": false, + "schema": { + "type": "boolean" } } - }, + ], "responses": { "200": { - "description": "If the index already exists, then a *HTTP 200* is returned.\n" - }, - "201": { - "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + "description": "The query was still running when the kill request was executed and\nthe query's kill flag has been set.\n" }, "400": { - "description": "If the collection already contains another TTL index, then an *HTTP 400* is\nreturned, as there can be at most one TTL index per collection.\n" + "description": "The request is malformed.\n" + }, + "403": { + "description": "In case the `all` parameter is used but the request was made in a\ndifferent database than `_system`, or by a non-privileged user.\n" }, "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" + "description": "A query with the specified identifier cannot be found.\n" } }, - "summary": "Create a TTL index", + "summary": "Kill a running AQL query", "tags": [ - "Indexes" + "Queries" ] } }, - "/_api/index#zkd": { - "post": { - "description": "Creates a multi-dimensional index for the collection `collection-name`, if\nit does not already exist. The call expects an object containing the index\ndetails.\n", - "operationId": "createIndexZkd", + "/_db/{database-name}/_api/replication/applier-config": { + "get": { + "description": "Returns the configuration of the replication applier.\n\nThe body of the response is a JSON object with the configuration. The\nfollowing attributes may be present in the configuration:\n\n- `endpoint`: the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\").\n\n- `database`: the name of the database to connect to (e.g. \"_system\").\n\n- `username`: an optional ArangoDB username to use when connecting to the endpoint.\n\n- `password`: the password to use when connecting to the endpoint.\n\n- `maxConnectRetries`: the maximum number of connection attempts the applier\n will make in a row. If the applier cannot establish a connection to the\n endpoint in this number of attempts, it will stop itself.\n\n- `connectTimeout`: the timeout (in seconds) when attempting to connect to the\n endpoint. This value is used for each connection attempt.\n\n- `requestTimeout`: the timeout (in seconds) for individual requests to the endpoint.\n\n- `chunkSize`: the requested maximum size for log transfer packets that\n is used when the endpoint is contacted.\n\n- `autoStart`: whether or not to auto-start the replication applier on\n (next and following) server starts\n\n- `adaptivePolling`: whether or not the replication applier will use\n adaptive polling.\n\n- `includeSystem`: whether or not system collection operations will be applied\n\n- `autoResync`: whether or not the follower should perform a full automatic\n resynchronization with the leader in case the leader cannot serve log data\n requested by the follower, or when the replication is started and no tick\n value\n can be found.\n\n- `autoResyncRetries`: number of resynchronization retries that will be performed\n in a row when automatic resynchronization is enabled and kicks in. Setting this\n to `0` will effectively disable `autoResync`. Setting it to some other value\n will limit the number of retries that are performed. This helps preventing endless\n retries in case resynchronizations always fail.\n\n- `initialSyncMaxWaitTime`: the maximum wait time (in seconds) that the initial\n synchronization will wait for a response from the leader when fetching initial\n collection data.\n This wait time can be used to control after what time the initial synchronization\n will give up waiting for a response and fail. This value is relevant even\n for continuous replication when `autoResync` is set to `true` because this\n may re-start the initial synchronization when the leader cannot provide\n log data the follower requires.\n This value will be ignored if set to `0`.\n\n- `connectionRetryWaitTime`: the time (in seconds) that the applier will\n intentionally idle before it retries connecting to the leader in case of\n connection problems.\n This value will be ignored if set to `0`.\n\n- `idleMinWaitTime`: the minimum wait time (in seconds) that the applier will\n intentionally idle before fetching more log data from the leader in case\n the leader has already sent all its log data. This wait time can be used\n to control the frequency with which the replication applier sends HTTP log\n fetch requests to the leader in case there is no write activity on the leader.\n This value will be ignored if set to `0`.\n\n- `idleMaxWaitTime`: the maximum wait time (in seconds) that the applier will\n intentionally idle before fetching more log data from the leader in case the\n leader has already sent all its log data and there have been previous log\n fetch attempts that resulted in no more log data. This wait time can be used\n to control the maximum frequency with which the replication applier sends HTTP\n log fetch requests to the leader in case there is no write activity on the\n leader for longer periods. This configuration value will only be used if the\n option `adaptivePolling` is set to `true`.\n This value will be ignored if set to `0`.\n\n- `requireFromPresent`: if set to `true`, then the replication applier will check\n at start whether the start tick from which it starts or resumes replication is\n still present on the leader. If not, then there would be data loss. If\n `requireFromPresent` is `true`, the replication applier will abort with an\n appropriate error message. If set to `false`, then the replication applier will\n still start, and ignore the data loss.\n\n- `verbose`: if set to `true`, then a log line will be emitted for all operations\n performed by the replication applier. This should be used for debugging\n replication\n problems only.\n\n- `restrictType`: the configuration for `restrictCollections`\n\n- `restrictCollections`: the optional array of collections to include or exclude,\n based on the setting of `restrictType`\n", + "operationId": "getReplicationApplierConfig", "parameters": [ { - "description": "The collection name.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "If set to `true`, returns the configuration of the global replication applier for all\ndatabases. If set to `false`, returns the configuration of the replication applier in the\nselected database.\n", "in": "query", - "name": "collection", + "name": "global", + "required": false, + "schema": { + "type": "boolean" + } + } + ], + "responses": { + "200": { + "description": "is returned if the request was executed successfully.\n" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.\n" + } + }, + "summary": "Get the replication applier configuration", + "tags": [ + "Replication" + ] + }, + "put": { + "description": "Sets the configuration of the replication applier. The configuration can\nonly be changed while the applier is not running. The updated configuration\nwill be saved immediately but only become active with the next start of the\napplier.\n\nIn case of success, the body of the response is a JSON object with the updated\nconfiguration.\n", + "operationId": "updateReplicationApplierConfig", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } + }, + { + "description": "If set to `true`, adjusts the configuration of the global replication applier for all\ndatabases. If set to `false`, adjusts the configuration of the replication applier in the\nselected database.\n", + "in": "query", + "name": "global", + "required": false, + "schema": { + "type": "boolean" + } } ], "requestBody": { @@ -18135,38 +23359,108 @@ "application/json": { "schema": { "properties": { - "fieldValueTypes": { - "description": "must be equal to `\"double\"`. Currently only doubles are supported as values.\n", + "adaptivePolling": { + "description": "if set to `true`, the replication applier will fall\nto sleep for an increasingly long period in case the logger server at the\nendpoint does not have any more replication events to apply. Using\nadaptive polling is thus useful to reduce the amount of work for both the\napplier and the logger server for cases when there are only infrequent\nchanges. The downside is that when using adaptive polling, it might take\nlonger for the replication applier to detect that there are new replication\nevents on the logger server.\n\nSetting `adaptivePolling` to false will make the replication applier\ncontact the logger server in a constant interval, regardless of whether\nthe logger server provides updates frequently or seldom.\n", + "type": "boolean" + }, + "autoResync": { + "description": "whether or not the follower should perform a full automatic resynchronization\nwith the leader in case the leader cannot serve log data requested by the\nfollower, or when the replication is started and no tick value can be found.\n", + "type": "boolean" + }, + "autoResyncRetries": { + "description": "number of resynchronization retries that will be performed in a row when\nautomatic resynchronization is enabled and kicks in. Setting this to `0`\nwill\neffectively disable `autoResync`. Setting it to some other value will limit\nthe number of retries that are performed. This helps preventing endless\nretries\nin case resynchronizations always fail.\n", + "type": "integer" + }, + "autoStart": { + "description": "whether or not to auto-start the replication applier on\n(next and following) server starts\n", + "type": "boolean" + }, + "chunkSize": { + "description": "the requested maximum size for log transfer packets that\nis used when the endpoint is contacted.\n", + "type": "integer" + }, + "connectTimeout": { + "description": "the timeout (in seconds) when attempting to connect to the\nendpoint. This value is used for each connection attempt.\n", + "type": "integer" + }, + "connectionRetryWaitTime": { + "description": "the time (in seconds) that the applier will intentionally idle before\nit retries connecting to the leader in case of connection problems.\nThis value will be ignored if set to `0`.\n", + "type": "integer" + }, + "database": { + "description": "the name of the database on the endpoint. If not specified, defaults to the current local database name.\n", "type": "string" }, - "fields": { - "description": "an array of attribute names used for each dimension. Array expansions are not allowed.\n", + "endpoint": { + "description": "the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\"). The endpoint must be specified.\n", + "type": "string" + }, + "idleMaxWaitTime": { + "description": "the maximum wait time (in seconds) that the applier will intentionally idle\nbefore fetching more log data from the leader in case the leader has\nalready sent all its log data and there have been previous log fetch attempts\nthat resulted in no more log data. This wait time can be used to control the\nmaximum frequency with which the replication applier sends HTTP log fetch\nrequests to the leader in case there is no write activity on the leader for\nlonger periods. This configuration value will only be used if the option\n`adaptivePolling` is set to `true`.\nThis value will be ignored if set to `0`.\n", + "type": "integer" + }, + "idleMinWaitTime": { + "description": "the minimum wait time (in seconds) that the applier will intentionally idle\nbefore fetching more log data from the leader in case the leader has\nalready sent all its log data. This wait time can be used to control the\nfrequency with which the replication applier sends HTTP log fetch requests\nto the leader in case there is no write activity on the leader.\nThis value will be ignored if set to `0`.\n", + "type": "integer" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied\n", + "type": "boolean" + }, + "initialSyncMaxWaitTime": { + "description": "the maximum wait time (in seconds) that the initial synchronization will\nwait for a response from the leader when fetching initial collection data.\nThis wait time can be used to control after what time the initial\nsynchronization\nwill give up waiting for a response and fail. This value is relevant even\nfor continuous replication when `autoResync` is set to `true` because this\nmay re-start the initial synchronization when the leader cannot provide\nlog data the follower requires.\nThis value will be ignored if set to `0`.\n", + "type": "integer" + }, + "maxConnectRetries": { + "description": "the maximum number of connection attempts the applier\nwill make in a row. If the applier cannot establish a connection to the\nendpoint in this number of attempts, it will stop itself.\n", + "type": "integer" + }, + "password": { + "description": "the password to use when connecting to the endpoint.\n", + "type": "string" + }, + "requestTimeout": { + "description": "the timeout (in seconds) for individual requests to the endpoint.\n", + "type": "integer" + }, + "requireFromPresent": { + "description": "if set to `true`, then the replication applier will check\nat start whether the start tick from which it starts or resumes replication is\nstill present on the leader. If not, then there would be data loss. If\n`requireFromPresent` is `true`, the replication applier will abort with an\nappropriate error message. If set to `false`, then the replication applier will\nstill start, and ignore the data loss.\n", + "type": "boolean" + }, + "restrictCollections": { + "description": "the array of collections to include or exclude,\nbased on the setting of `restrictType`\n", "items": { "type": "string" }, "type": "array" }, - "inBackground": { - "description": "You can set this option to `true` to create the index\nin the background, which will not write-lock the underlying collection for\nas long as if the index is built in the foreground. The default value is `false`.\n", - "type": "boolean" - }, - "name": { - "description": "An easy-to-remember name for the index to look it up or refer to it in index hints.\nIndex names are subject to the same character restrictions as collection names.\nIf omitted, a name is auto-generated so that it is unique with respect to the\ncollection, e.g. `idx_832910498`.\n", + "restrictType": { + "description": "the configuration for `restrictCollections`; Has to be either `include` or `exclude`\n", "type": "string" }, - "type": { - "description": "must be equal to `\"zkd\"`.\n", + "username": { + "description": "an optional ArangoDB username to use when connecting to the endpoint.\n", "type": "string" }, - "unique": { - "description": "if `true`, then create a unique index.\n", + "verbose": { + "description": "if set to `true`, then a log line will be emitted for all operations\nperformed by the replication applier. This should be used for debugging replication\nproblems only.\n", "type": "boolean" } }, "required": [ - "type", - "fields", - "fieldValueTypes" + "endpoint", + "database", + "password", + "maxConnectRetries", + "connectTimeout", + "requestTimeout", + "chunkSize", + "autoStart", + "adaptivePolling", + "includeSystem", + "requireFromPresent", + "verbose", + "restrictType" ], "type": "object" } @@ -18175,241 +23469,199 @@ }, "responses": { "200": { - "description": "If the index already exists, then a *HTTP 200* is\nreturned.\n" - }, - "201": { - "description": "If the index does not already exist and could be created, then a *HTTP 201*\nis returned.\n" + "description": "is returned if the request was executed successfully.\n" }, "400": { - "description": "If the index definition is invalid, then a *HTTP 400* is returned.\n" - }, - "404": { - "description": "If the `collection-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Create a multi-dimensional index", - "tags": [ - "Indexes" - ] - } - }, - "/_api/index/{index-id}": { - "delete": { - "description": "Deletes an index with `index-id`.\n", - "operationId": "deleteIndex", - "parameters": [ - { - "description": "The index id.\n", - "in": "path", - "name": "index-id", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "If the index could be deleted, then an *HTTP 200* is\nreturned.\n" + "description": "is returned if the configuration is incomplete or malformed, or if the\nreplication applier is currently running.\n" }, - "404": { - "description": "If the `index-id` is unknown, then an *HTTP 404* is returned.\n" - } - }, - "summary": "Delete an index", - "tags": [ - "Indexes" - ] - }, - "get": { - "description": "The result is an object describing the index. It has at least the following\nattributes:\n\n- `id`: the identifier of the index\n\n- `type`: the index type\n\nAll other attributes are type-dependent. For example, some indexes provide\n`unique` or `sparse` flags, whereas others don't. Some indexes also provide\na selectivity estimate in the `selectivityEstimate` attribute of the result.\n", - "operationId": "getIndex", - "parameters": [ - { - "description": "The index identifier.\n", - "in": "path", - "name": "index-id", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "200": { - "description": "If the index exists, then a *HTTP 200* is returned.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" }, - "404": { - "description": "If the index does not exist, then a *HTTP 404*\nis returned.\n" + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "Get an index", + "summary": "Update the replication applier configuration", "tags": [ - "Indexes" + "Replication" ] } - }, - "/_api/job/{job-id}": { - "delete": { - "description": "Deletes either all job results, expired job results, or the result of a\nspecific job.\nClients can use this method to perform an eventual garbage collection of job\nresults.\n", - "operationId": "deleteJob", + }, + "/_db/{database-name}/_api/replication/applier-start": { + "put": { + "description": "Starts the replication applier. This will return immediately if the\nreplication applier is already running.\n\nIf the replication applier is not already running, the applier configuration\nwill be checked, and if it is complete, the applier will be started in a\nbackground thread. This means that even if the applier will encounter any\nerrors while running, they will not be reported in the response to this\nmethod.\n\nTo detect replication applier errors after the applier was started, use the\n`/_api/replication/applier-state` API instead.\n", + "operationId": "startReplicationApplier", "parameters": [ { - "description": "The ID of the job to delete. The ID can be:\n- `all`: Deletes all jobs results. Currently executing or queued async\n jobs are not stopped by this call.\n- `expired`: Deletes expired results. To determine the expiration status of a\n result, pass the stamp query parameter. stamp needs to be a Unix timestamp,\n and all async job results created before this time are deleted.\n- **A numeric job ID**: In this case, the call removes the result of the\n specified async job. If the job is currently executing or queued, it is\n not aborted.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "job-id", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "A Unix timestamp specifying the expiration threshold for when the `job-id` is\nset to `expired`.\n", + "description": "If set to `true`, starts the global replication applier for all\ndatabases. If set to `false`, starts the replication applier in the\nselected database.\n", "in": "query", - "name": "stamp", + "name": "global", "required": false, "schema": { - "type": "number" + "type": "boolean" + } + }, + { + "description": "The remote `lastLogTick` value from which to start applying. If not specified,\nthe last saved tick from the previous applier run is used. If there is no\nprevious applier state saved, the applier will start at the beginning of the\nlogger server's log.\n", + "in": "query", + "name": "from", + "required": false, + "schema": { + "type": "string" } } ], "responses": { "200": { - "description": "is returned if the deletion operation was carried out successfully.\nThis code will also be returned if no results were deleted.\n" + "description": "is returned if the request was executed successfully.\n" }, "400": { - "description": "is returned if `job-id` is not specified or has an invalid value.\n" + "description": "is returned if the replication applier is not fully configured or the\nconfiguration is invalid.\n" }, - "404": { - "description": "is returned if `job-id` is a syntactically valid job ID but no async job with\nthe specified ID is found.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "Delete async job results", + "summary": "Start the replication applier", "tags": [ - "Jobs" + "Replication" ] - }, + } + }, + "/_db/{database-name}/_api/replication/applier-state": { "get": { - "description": "This endpoint returns either of the following, depending on the specified value\nfor the `job-id` parameter:\n\n- The IDs of async jobs with a specific status\n- The processing status of a specific async job\n", - "operationId": "getJob", + "description": "Returns the state of the replication applier, regardless of whether the\napplier is currently running or not.\n\nThe response is a JSON object with the following attributes:\n\n- `state`: a JSON object with the following sub-attributes:\n\n - `running`: whether or not the applier is active and running\n\n - `lastAppliedContinuousTick`: the last tick value from the continuous\n replication log the applier has applied.\n\n - `lastProcessedContinuousTick`: the last tick value from the continuous\n replication log the applier has processed.\n\n Regularly, the last applied and last processed tick values should be\n identical. For transactional operations, the replication applier will first\n process incoming log events before applying them, so the processed tick\n value might be higher than the applied tick value. This will be the case\n until the applier encounters the *transaction commit* log event for the\n transaction.\n\n - `lastAvailableContinuousTick`: the last tick value the remote server can\n provide, for all databases.\n\n - `ticksBehind`: this attribute will be present only if the applier is currently\n running. It will provide the number of log ticks between what the applier\n has applied/seen and the last log tick value provided by the remote server.\n If this value is zero, then both servers are in sync. If this is non-zero,\n then the remote server has additional data that the applier has not yet\n fetched and processed, or the remote server may have more data that is not\n applicable to the applier.\n\n Client applications can use it to determine approximately how far the applier\n is behind the remote server, and can periodically check if the value is\n increasing (applier is falling behind) or decreasing (applier is catching up).\n\n Please note that as the remote server will only keep one last log tick value\n for all of its databases, but replication may be restricted to just certain\n databases on the applier, this value is more meaningful when the global applier\n is used.\n Additionally, the last log tick provided by the remote server may increase\n due to writes into system collections that are not replicated due to replication\n configuration. So the reported value may exaggerate the reality a bit for\n some scenarios.\n\n - `time`: the time on the applier server.\n\n - `totalRequests`: the total number of requests the applier has made to the\n endpoint.\n\n - `totalFailedConnects`: the total number of failed connection attempts the\n applier has made.\n\n - `totalEvents`: the total number of log events the applier has processed.\n\n - `totalOperationsExcluded`: the total number of log events excluded because\n of `restrictCollections`.\n\n - `progress`: a JSON object with details about the replication applier progress.\n It contains the following sub-attributes if there is progress to report:\n\n - `message`: a textual description of the progress\n\n - `time`: the date and time the progress was logged\n\n - `failedConnects`: the current number of failed connection attempts\n\n - `lastError`: a JSON object with details about the last error that happened on\n the applier. It contains the following sub-attributes if there was an error:\n\n - `errorNum`: a numerical error code\n\n - `errorMessage`: a textual error description\n\n - `time`: the date and time the error occurred\n\n In case no error has occurred, `lastError` will be empty.\n\n- `server`: a JSON object with the following sub-attributes:\n\n - `version`: the applier server's version\n\n - `serverId`: the applier server's id\n\n- `endpoint`: the endpoint the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\n- `database`: the name of the database the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\nPlease note that all \"tick\" values returned do not have a specific unit. Tick\nvalues are only meaningful when compared to each other. Higher tick values mean\n\"later in time\" than lower tick values.\n", + "operationId": "getReplicationApplierState", "parameters": [ { - "description": "If you provide a value of `pending` or `done`, then the endpoint returns an\narray of strings with the job IDs of ongoing or completed async jobs.\n\nIf you provide a numeric job ID, then the endpoint returns the status of the\nspecific async job in the form of an HTTP reply without payload. Check the\nHTTP status code of the response for the job status.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "job-id", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The maximum number of job IDs to return per call. If not specified, a\nserver-defined maximum value is used. Only applicable if you specify `pending`\nor `done` as `job-id` to list jobs.\n", + "description": "If set to `true`, returns the state of the global replication applier for all\ndatabases. If set to `false`, returns the state of the replication applier in the\nselected database.\n", "in": "query", - "name": "count", + "name": "global", "required": false, "schema": { - "type": "number" + "type": "boolean" } } ], "responses": { "200": { - "description": "is returned if the job with the specified `job-id` has finished and you can\nfetch its results, or if your request for the list of `pending` or `done` jobs\nis successful (the list might be empty).\n" - }, - "204": { - "description": "is returned if the job with the specified `job-id` is still in the queue of\npending (or not yet finished) jobs.\n" + "description": "is returned if the request was executed successfully.\n" }, - "400": { - "description": "is returned if you specified an invalid value for `job-id` or no value.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" }, - "404": { - "description": "is returned if the job cannot be found or is already fetched or deleted from the\njob result list.\n" + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "List async jobs by status or get the status of specific job", + "summary": "Get the replication applier state", "tags": [ - "Jobs" + "Replication" ] - }, + } + }, + "/_db/{database-name}/_api/replication/applier-stop": { "put": { - "description": "Returns the result of an async job identified by `job-id`. If the async job\nresult is present on the server, the result will be removed from the list of\nresult. That means this method can be called for each job-id once.\nThe method will return the original job result's headers and body, plus the\nadditional HTTP header x-arango-async-job-id. If this header is present,\nthen\nthe job was found and the response contains the original job's result. If\nthe header is not present, the job was not found and the response contains\nstatus information from the job manager.\n", - "operationId": "getJobResult", + "description": "Stops the replication applier. This will return immediately if the\nreplication applier is not running.\n", + "operationId": "stopReplicationApplier", "parameters": [ { - "description": "The async job id.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "job-id", + "name": "database-name", "required": true, "schema": { "type": "string" } + }, + { + "description": "If set to `true`, stops the global replication applier for all\ndatabases. If set to `false`, stops the replication applier in the\nselected database.\n", + "in": "query", + "name": "global", + "required": false, + "schema": { + "type": "boolean" + } } ], "responses": { - "204": { - "description": "is returned if the job requested via job-id is still in the queue of pending\n(or not yet finished) jobs. In this case, no x-arango-async-id HTTP header\nwill be returned.\n" + "200": { + "description": "is returned if the request was executed successfully.\n" }, - "400": { - "description": "is returned if no job-id was specified in the request. In this case,\nno x-arango-async-id HTTP header will be returned.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" }, - "404": { - "description": "is returned if the job was not found or already deleted or fetched from\nthe job result list. In this case, no x-arango-async-id HTTP header will\nbe returned.\n" + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "Get the results of an async job", + "summary": "Stop the replication applier", "tags": [ - "Jobs" + "Replication" ] } }, - "/_api/job/{job-id}/cancel": { - "put": { - "description": "Cancels the currently running job identified by job-id. Note that it still\nmight take some time to actually cancel the running async job.\n", - "operationId": "cancelJob", + "/_db/{database-name}/_api/replication/batch": { + "post": { + "description": "\u003e **INFO:**\nThis is an internally used endpoint.\n\n\nCreates a new dump batch and returns the batch's id.\n\nThe response is a JSON object with the following attributes:\n\n- `id`: the id of the batch\n- `lastTick`: snapshot tick value using when creating the batch\n- `state`: additional leader state information (only present if the\n `state` URL parameter was set to `true` in the request)\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n", + "operationId": "createReplicationBatch", "parameters": [ { - "description": "The async job id.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "job-id", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "200": { - "description": "cancel has been initiated.\n" - }, - "400": { - "description": "is returned if no job-id was specified in the request. In this case,\nno x-arango-async-id HTTP header will be returned.\n" }, - "404": { - "description": "is returned if the job was not found or already deleted or fetched from\nthe job result list. In this case, no x-arango-async-id HTTP header will\nbe returned.\n" + { + "description": "setting `state` to true will make the response also contain\na `state` attribute with information about the leader state.\nThis is used only internally during the replication process\nand should not be used by client applications.\n", + "in": "query", + "name": "state", + "required": false, + "schema": { + "type": "boolean" + } } - }, - "summary": "Cancel an async job", - "tags": [ - "Jobs" - ] - } - }, - "/_api/query": { - "post": { - "description": "This endpoint is for query validation only. To actually query the database,\nsee `/api/cursor`.\n", - "operationId": "parseAqlQuery", + ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "query": { - "description": "To validate a query string without executing it, the query string can be\npassed to the server via an HTTP POST request.\n", - "type": "string" + "ttl": { + "description": "The time-to-live for the new batch (in seconds).\n", + "type": "integer" } }, "required": [ - "query" + "ttl" ], "type": "object" } @@ -18418,128 +23670,140 @@ }, "responses": { "200": { - "description": "If the query is valid, the server will respond with *HTTP 200* and\nreturn the names of the bind parameters it found in the query (if any) in\nthe `bindVars` attribute of the response. It will also return an array\nof the collections used in the query in the `collections` attribute.\nIf a query can be parsed successfully, the `ast` attribute of the returned\nJSON will contain the abstract syntax tree representation of the query.\nThe format of the `ast` is subject to change in future versions of\nArangoDB, but it can be used to inspect how ArangoDB interprets a given\nquery. Note that the abstract syntax tree will be returned without any\noptimizations applied to it.\n" + "description": "is returned if the batch was created successfully.\n" }, "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\nor if the query contains a parse error. The body of the response will\ncontain the error details embedded in a JSON object.\n" + "description": "is returned if the TTL value is invalid or if the `DBserver` attribute\nis not specified or illegal on a Coordinator.\n" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.\n" } }, - "summary": "Parse an AQL query", + "summary": "Create a new dump batch", "tags": [ - "Queries" + "Replication" ] } }, - "/_api/query-cache": { + "/_db/{database-name}/_api/replication/batch/{id}": { "delete": { - "description": "Clears all results stored in the AQL query results cache for the current database.\n", - "operationId": "deleteAqlQueryCache", - "responses": { - "200": { - "description": "The server will respond with *HTTP 200* when the cache was cleared\nsuccessfully.\n" + "description": "\u003e **INFO:**\nThis is an internally used endpoint.\n\n\nDeletes the existing dump batch, allowing compaction and cleanup to resume.\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n", + "operationId": "deleteReplicationBatch", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request.\n" + { + "description": "The id of the batch.\n", + "in": "path", + "name": "id", + "required": true, + "schema": { + "type": "string" + } } - }, - "summary": "Clear the AQL query results cache", - "tags": [ - "Queries" - ] - } - }, - "/_api/query-cache/entries": { - "get": { - "description": "Returns an array containing the AQL query results currently stored in the query results\ncache of the selected database. Each result is a JSON object with the following attributes:\n\n- `hash`: the query result's hash\n- `query`: the query string\n- `bindVars`: the query's bind parameters. this attribute is only shown if tracking for\n bind variables was enabled at server start\n- `size`: the size of the query result and bind parameters, in bytes\n- `results`: number of documents/rows in the query result\n- `started`: the date and time when the query was stored in the cache\n- `hits`: number of times the result was served from the cache (can be\n `0` for queries that were only stored in the cache but were never accessed\n again afterwards)\n- `runTime`: the query's run time\n- `dataSources`: an array of collections/Views the query was using\n", - "operationId": "listQueryCacheResults", + ], "responses": { - "200": { - "description": "Is returned when the list of results can be retrieved successfully.\n" + "204": { + "description": "is returned if the batch was deleted successfully.\n" }, "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\n" - } - }, - "summary": "List the entries of the AQL query results cache", - "tags": [ - "Queries" - ] - } - }, - "/_api/query-cache/properties": { - "get": { - "description": "Returns the global AQL query results cache configuration. The configuration is a\nJSON object with the following properties:\n\n- `mode`: the mode the AQL query results cache operates in. The mode is one of the following\n values: `off`, `on`, or `demand`.\n\n- `maxResults`: the maximum number of query results that will be stored per database-specific\n cache.\n\n- `maxResultsSize`: the maximum cumulated size of query results that will be stored per\n database-specific cache.\n\n- `maxEntrySize`: the maximum individual result size of queries that will be stored per\n database-specific cache.\n\n- `includeSystem`: whether or not results of queries that involve system collections will be\n stored in the query results cache.\n", - "operationId": "getQueryCacheProperties", - "responses": { - "200": { - "description": "Is returned if the properties can be retrieved successfully.\n" + "description": "is returned if the batch was not found.\n" }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" } }, - "summary": "Get the AQL query results cache configuration", + "summary": "Delete an existing dump batch", "tags": [ - "Queries" + "Replication" ] }, "put": { - "description": "Adjusts the global properties for the AQL query results cache.\n\nAfter the properties have been changed, the current set of properties will\nbe returned in the HTTP response.\n\nNote: changing the properties may invalidate all results in the cache.\n\nThe properties need to be passed in the `properties` attribute in the body\nof the HTTP request. `properties` needs to be a JSON object with the following\nproperties:\n", - "operationId": "setQueryCacheProperties", + "description": "\u003e **INFO:**\nThis is an internally used endpoint.\n\n\nExtends the time-to-live (TTL) of an existing dump batch, using the batch's ID and\nthe provided TTL value.\n\nIf the batch's TTL can be extended successfully, the response is empty.\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n", + "operationId": "extendReplicationBatch", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The id of the batch.\n", + "in": "path", + "name": "id", + "required": true, + "schema": { + "type": "string" + } + } + ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "includeSystem": { - "description": "whether or not to store results of queries that involve system collections.\n", - "type": "boolean" - }, - "maxEntrySize": { - "description": "the maximum individual size of query results that will be stored per database-specific cache.\n", - "type": "integer" - }, - "maxResults": { - "description": "the maximum number of query results that will be stored per database-specific cache.\n", - "type": "integer" - }, - "maxResultsSize": { - "description": "the maximum cumulated size of query results that will be stored per database-specific cache.\n", + "ttl": { + "description": "the time-to-live for the new batch (in seconds)\n", "type": "integer" - }, - "mode": { - "description": "the mode the AQL query cache should operate in. Possible values are `off`, `on`, or `demand`.\n", - "type": "string" } }, + "required": [ + "ttl" + ], "type": "object" } } } }, "responses": { - "200": { - "description": "Is returned if the properties were changed successfully.\n" + "204": { + "description": "is returned if the batch's ttl was extended successfully.\n" }, "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\n" + "description": "is returned if the ttl value is invalid or the batch was not found.\n" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.\n" } }, - "summary": "Set the AQL query results cache configuration", + "summary": "Extend the TTL of a dump batch", "tags": [ - "Queries" + "Replication" ] } }, - "/_api/query/current": { + "/_db/{database-name}/_api/replication/clusterInventory": { "get": { - "description": "Returns an array containing the AQL queries currently running in the selected\ndatabase. Each query is a JSON object with the following attributes:\n\n- `id`: the query's id\n\n- `database`: the name of the database the query runs in\n\n- `user`: the name of the user that started the query\n\n- `query`: the query string (potentially truncated)\n\n- `bindVars`: the bind parameter values used by the query\n\n- `started`: the date and time when the query was started\n\n- `runTime`: the query's run time up to the point the list of queries was\n queried\n\n- `peakMemoryUsage`: the query's peak memory usage in bytes (in increments of 32KB)\n\n- `state`: the query's current execution state (as a string). One of:\n - `\"initializing\"`\n - `\"parsing\"`\n - `\"optimizing ast\"`\n - `\"loading collections\"`\n - `\"instantiating plan\"`\n - `\"optimizing plan\"`\n - `\"instantiating executors\"`\n - `\"executing\"`\n - `\"finalizing\"`\n - `\"finished\"`\n - `\"killed\"`\n - `\"invalid\"`\n\n- `stream`: whether or not the query uses a streaming cursor\n", - "operationId": "listAqlQueries", + "description": "Returns the array of collections and indexes available on the cluster.\n\nThe response will be an array of JSON objects, one for each collection.\nEach collection contains exactly two keys, `parameters` and `indexes`.\nThis information comes from `Plan/Collections/{DB-Name}/*` in the Agency,\njust that the `indexes` attribute there is relocated to adjust it to\nthe data format of arangodump.\n", + "operationId": "getReplicationClusterInventory", "parameters": [ { - "description": "If set to `true`, will return the currently running queries in all databases,\nnot just the selected one.\nUsing the parameter is only allowed in the system database and with superuser\nprivileges.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Include system collections in the result. The default value is `true`.\n", "in": "query", - "name": "all", + "name": "includeSystem", "required": false, "schema": { "type": "boolean" @@ -18548,208 +23812,238 @@ ], "responses": { "200": { - "description": "Is returned when the list of queries can be retrieved successfully.\n" + "description": "is returned if the request was executed successfully.\n" }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" }, - "403": { - "description": "*HTTP 403* is returned in case the `all` parameter was used, but the request\nwas made in a different database than _system, or by an non-privileged user.\n" + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "List the running AQL queries", + "summary": "Get the cluster collections and indexes", "tags": [ - "Queries" + "Replication" ] } }, - "/_api/query/properties": { + "/_db/{database-name}/_api/replication/dump": { "get": { - "description": "Returns the current query tracking configuration. The configuration is a\nJSON object with the following properties:\n\n- `enabled`: if set to `true`, then queries will be tracked. If set to\n `false`, neither queries nor slow queries will be tracked.\n\n- `trackSlowQueries`: if set to `true`, then slow queries will be tracked\n in the list of slow queries if their runtime exceeds the value set in\n `slowQueryThreshold`. In order for slow queries to be tracked, the `enabled`\n property must also be set to `true`.\n\n- `trackBindVars`: if set to `true`, then bind variables used in queries will\n be tracked.\n\n- `maxSlowQueries`: the maximum number of slow queries to keep in the list\n of slow queries. If the list of slow queries is full, the oldest entry in\n it will be discarded when additional slow queries occur.\n\n- `slowQueryThreshold`: the threshold value for treating a query as slow. A\n query with a runtime greater or equal to this threshold value will be\n put into the list of slow queries when slow query tracking is enabled.\n The value for `slowQueryThreshold` is specified in seconds.\n\n- `maxQueryStringLength`: the maximum query string length to keep in the\n list of queries. Query strings can have arbitrary lengths, and this property\n can be used to save memory in case very long query strings are used. The\n value is specified in bytes.\n", - "operationId": "getAqlQueryTrackingProperties", - "responses": { - "200": { - "description": "Is returned if properties were retrieved successfully.\n" + "description": "Returns the data from a collection for the requested range.\n\nThe `chunkSize` query parameter can be used to control the size of the result.\nIt must be specified in bytes. The `chunkSize` value will only be honored\napproximately. Otherwise a too low `chunkSize` value could cause the server\nto not be able to put just one entry into the result and return it.\nTherefore, the `chunkSize` value will only be consulted after an entry has\nbeen written into the result. If the result size is then greater than\n`chunkSize`, the server will respond with as many entries as there are\nin the response already. If the result size is still less than `chunkSize`,\nthe server will try to return more data if there's more data left to return.\n\nIf `chunkSize` is not specified, some server-side default value will be used.\n\nThe `Content-Type` of the result is `application/x-arango-dump`. This is an\neasy-to-process format, with all entries going onto separate lines in the\nresponse body.\n\nEach line itself is a JSON object, with at least the following attributes:\n\n- `tick`: the operation's tick attribute\n\n- `key`: the key of the document/edge or the key used in the deletion operation\n\n- `rev`: the revision id of the document/edge or the deletion operation\n\n- `data`: the actual document/edge data for types 2300 and 2301. The full\n document/edge data will be returned even for updates.\n\n- `type`: the type of entry. Possible values for `type` are:\n\n - 2300: document insertion/update\n\n - 2301: edge insertion/update\n\n - 2302: document/edge deletion\n\n\u003e **INFO:**\nThere will be no distinction between inserts and updates when calling this method.\n", + "operationId": "getReplicationDump", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\n" - } - }, - "summary": "Get the AQL query tracking configuration", - "tags": [ - "Queries" - ] - }, - "put": { - "description": "The properties need to be passed in the attribute `properties` in the body\nof the HTTP request. `properties` needs to be a JSON object.\n\nAfter the properties have been changed, the current set of properties will\nbe returned in the HTTP response.\n", - "operationId": "updateAqlQueryTrackingProperties", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "enabled": { - "description": "If set to `true`, then queries will be tracked. If set to\n`false`, neither queries nor slow queries will be tracked.\n", - "type": "boolean" - }, - "maxQueryStringLength": { - "description": "The maximum query string length to keep in the list of queries.\nQuery strings can have arbitrary lengths, and this property\ncan be used to save memory in case very long query strings are used. The\nvalue is specified in bytes.\n", - "type": "integer" - }, - "maxSlowQueries": { - "description": "The maximum number of slow queries to keep in the list\nof slow queries. If the list of slow queries is full, the oldest entry in\nit will be discarded when additional slow queries occur.\n", - "type": "integer" - }, - "slowQueryThreshold": { - "description": "The threshold value for treating a query as slow. A\nquery with a runtime greater or equal to this threshold value will be\nput into the list of slow queries when slow query tracking is enabled.\nThe value for `slowQueryThreshold` is specified in seconds.\n", - "type": "integer" - }, - "trackBindVars": { - "description": "If set to `true`, then the bind variables used in queries will be tracked\nalong with queries.\n", - "type": "boolean" - }, - "trackSlowQueries": { - "description": "If set to `true`, then slow queries will be tracked\nin the list of slow queries if their runtime exceeds the value set in\n`slowQueryThreshold`. In order for slow queries to be tracked, the `enabled`\nproperty must also be set to `true`.\n", - "type": "boolean" - } - }, - "required": [ - "enabled", - "trackSlowQueries", - "trackBindVars", - "maxSlowQueries", - "slowQueryThreshold", - "maxQueryStringLength" - ], - "type": "object" - } + { + "description": "The name or id of the collection to dump.\n", + "in": "query", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Approximate maximum size of the returned result.\n", + "in": "query", + "name": "chunkSize", + "required": false, + "schema": { + "type": "number" + } + }, + { + "description": "The id of the snapshot to use\n", + "in": "query", + "name": "batchId", + "required": true, + "schema": { + "type": "number" } } - }, + ], "responses": { "200": { - "description": "Is returned if the properties were changed successfully.\n" + "description": "is returned if the request was executed successfully and data was returned. The header\n`x-arango-replication-lastincluded` is set to the tick of the last document returned.\n" }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\n" + "204": { + "description": "is returned if the request was executed successfully, but there was no content available.\nThe header `x-arango-replication-lastincluded` is `0` in this case.\n" + }, + "404": { + "description": "is returned when the collection could not be found.\n" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "Update the AQL query tracking configuration", + "summary": "Get a replication dump", "tags": [ - "Queries" + "Replication" ] } }, - "/_api/query/rules": { + "/_db/{database-name}/_api/replication/inventory": { "get": { - "description": "A list of all optimizer rules and their properties.\n", - "operationId": "getAqlQueryOptimizerRules", + "description": "Returns the array of collections and their indexes, and the array of Views available. These\narrays can be used by replication clients to initiate an initial synchronization with the\nserver.\nThe response will contain all collections, their indexes and views in the requested database\nif `global` is not set, and all collections, indexes and views in all databases if `global`\nis set.\nIn case `global` is not set, it is possible to restrict the response to a single collection\nby setting the `collection` parameter. In this case the response will contain only information\nabout the requested collection in the `collections` array, and no information about views\n(i.e. the `views` response attribute will be an empty array).\n\nThe response will contain a JSON object with the `collections`, `views`, `state` and\n`tick` attributes.\n\n`collections` is an array of collections with the following sub-attributes:\n\n- `parameters`: the collection properties\n\n- `indexes`: an array of the indexes of the collection. Primary indexes and edge indexes\n are not included in this array.\n\nThe `state` attribute contains the current state of the replication logger. It\ncontains the following sub-attributes:\n\n- `running`: whether or not the replication logger is currently active. Note:\n since ArangoDB 2.2, the value will always be `true`\n\n- `lastLogTick`: the value of the last tick the replication logger has written\n\n- `time`: the current time on the server\n\n`views` is an array of available views.\n\nReplication clients should note the `lastLogTick` value returned. They can then\nfetch collections' data using the dump method up to the value of lastLogTick, and\nquery the continuous replication log for log events after this tick value.\n\nTo create a full copy of the collections on the server, a replication client\ncan execute these steps:\n\n- call the `/inventory` API method. This returns the `lastLogTick` value and the\n array of collections and indexes from the server.\n\n- for each collection returned by `/inventory`, create the collection locally and\n call `/dump` to stream the collection data to the client, up to the value of\n `lastLogTick`.\n After that, the client can create the indexes on the collections as they were\n reported by `/inventory`.\n\nIf the clients wants to continuously stream replication log events from the logger\nserver, the following additional steps need to be carried out:\n\n- the client should call `/_api/wal/tail` initially to fetch the first batch of\n replication events that were logged after the client's call to `/inventory`.\n\n The call to `/_api/wal/tail` should use a `from` parameter with the value of the\n `lastLogTick` as reported by `/inventory`. The call to `/_api/wal/tail` will\n return the `x-arango-replication-lastincluded` header which will contain the\n last tick value included in the response.\n\n- the client can then continuously call `/_api/wal/tail` to incrementally fetch new\n replication events that occurred after the last transfer.\n\n Calls should use a `from` parameter with the value of the `x-arango-replication-lastincluded`\n header of the previous response. If there are no more replication events, the\n response will be empty and clients can go to sleep for a while and try again\n later.\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n\n\n\u003e **INFO:**\nUsing the `global` parameter the top-level object contains a key `databases`\nunder which each key represents a database name, and the value conforms to the above description.\n", + "operationId": "getReplicationInventory", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Include system collections in the result. The default value is `true`.\n", + "in": "query", + "name": "includeSystem", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "Include all databases in the response. Only works on `_system` The default value is `false`.\n", + "in": "query", + "name": "global", + "required": false, + "schema": { + "type": "boolean" + } + }, + { + "description": "A valid batchId is required for this API call\n", + "in": "query", + "name": "batchId", + "required": true, + "schema": { + "type": "number" + } + }, + { + "description": "If this parameter is set, the response will be restricted to a single collection (the one\nspecified), and no views will be returned. This can be used as an optimization to reduce\nthe size of the response.\n", + "in": "query", + "name": "collection", + "required": false, + "schema": { + "type": "string" + } + } + ], "responses": { "200": { - "content": { - "application/json": { - "schema": { - "description": "An array of objects. Each object describes an AQL optimizer rule.\n", - "items": { - "properties": { - "flags": { - "description": "An object with the properties of the rule.\n", - "properties": { - "canBeDisabled": { - "description": "Whether users are allowed to disable this rule. A few rules are mandatory.\n", - "type": "boolean" - }, - "canCreateAdditionalPlans": { - "description": "Whether this rule may create additional query execution plans.\n", - "type": "boolean" - }, - "clusterOnly": { - "description": "Whether the rule is applicable in the cluster deployment mode only.\n", - "type": "boolean" - }, - "disabledByDefault": { - "description": "Whether the optimizer considers this rule by default.\n", - "type": "boolean" - }, - "enterpriseOnly": { - "description": "Whether the rule is available in the Enterprise Edition only.\n", - "type": "boolean" - }, - "hidden": { - "description": "Whether the rule is displayed to users. Internal rules are hidden.\n", - "type": "boolean" - } - }, - "required": [ - "hidden", - "clusterOnly", - "canBeDisabled", - "canCreateAdditionalPlans", - "disabledByDefault", - "enterpriseOnly" - ], - "type": "object" - }, - "name": { - "description": "The name of the optimizer rule as seen in query explain outputs.\n", - "type": "string" - } - }, - "required": [ - "name", - "flags" - ], - "type": "object" - }, - "type": "array" - } - } - }, - "description": "is returned if the list of optimizer rules can be retrieved successfully.\n" + "description": "is returned if the request was executed successfully.\n" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "List all AQL optimizer rules", + "summary": "Get a replication inventory", "tags": [ - "Queries" + "Replication" ] } }, - "/_api/query/slow": { - "delete": { - "description": "Clears the list of slow AQL queries for the current database.\n", - "operationId": "clearSlowAqlQueryList", + "/_db/{database-name}/_api/replication/logger-first-tick": { + "get": { + "description": "Returns the first available tick value that can be served from the server's\nreplication log. This method can be called by replication clients after to\ndetermine if certain data (identified by a tick value) is still available\nfor replication.\n\nThe result is a JSON object containing the attribute `firstTick`. This\nattribute contains the minimum tick value available in the server's\nreplication\nlog.\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", + "operationId": "getReplicationLoggerFirstTick", "parameters": [ { - "description": "If set to `true`, will clear the slow query history in all databases, not just\nthe selected one.\nUsing the parameter is only allowed in the system database and with superuser\nprivileges.\n", - "in": "query", - "name": "all", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], "responses": { "200": { - "description": "The server will respond with *HTTP 200* when the list of queries was\ncleared successfully.\n" + "description": "is returned if the request was executed successfully.\n" }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.\n" + }, + "501": { + "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" } }, - "summary": "Clear the list of slow AQL queries", + "summary": "Get the first available replication tick value", "tags": [ - "Queries" + "Replication" ] - }, + } + }, + "/_db/{database-name}/_api/replication/logger-follow": { "get": { - "description": "Returns an array containing the last AQL queries that are finished and\nhave exceeded the slow query threshold in the selected database.\nThe maximum amount of queries in the list can be controlled by setting\nthe query tracking property `maxSlowQueries`. The threshold for treating\na query as *slow* can be adjusted by setting the query tracking property\n`slowQueryThreshold`.\n\nEach query is a JSON object with the following attributes:\n\n- `id`: the query's id\n\n- `database`: the name of the database the query runs in\n\n- `user`: the name of the user that started the query\n\n- `query`: the query string (potentially truncated)\n\n- `bindVars`: the bind parameter values used by the query\n\n- `started`: the date and time when the query was started\n\n- `runTime`: the query's total run time\n\n- `peakMemoryUsage`: the query's peak memory usage in bytes (in increments of 32KB)\n\n- `state`: the query's current execution state (will always be \"finished\"\n for the list of slow queries)\n\n- `stream`: whether or not the query uses a streaming cursor\n", - "operationId": "listSlowAqlQueries", + "description": "\u003e **WARNING:**\nThis route should no longer be used.\nIt is considered as deprecated from version 3.4.0 on. Client applications\nshould use the REST API endpoint `/_api/wal/tail` instead.\n\n\nReturns data from the server's replication log. This method can be called\nby replication clients after an initial synchronization of data. The method\nwill return all \"recent\" log entries from the logger server, and the clients\ncan replay and apply these entries locally so they get to the same data\nstate as the logger server.\n\nClients can call this method repeatedly to incrementally fetch all changes\nfrom the logger server. In this case, they should provide the `from` value so\nthey will only get returned the log events since their last fetch.\n\nWhen the `from` query parameter is not used, the logger server will return log\nentries starting at the beginning of its replication log. When the `from`\nparameter is used, the logger server will only return log entries which have\nhigher tick values than the specified `from` value (note: the log entry with a\ntick value equal to `from` will be excluded). Use the `from` value when\nincrementally fetching log data.\n\nThe `to` query parameter can be used to optionally restrict the upper bound of\nthe result to a certain tick value. If used, the result will contain only log events\nwith tick values up to (including) `to`. In incremental fetching, there is no\nneed to use the `to` parameter. It only makes sense in special situations,\nwhen only parts of the change log are required.\n\nThe `chunkSize` query parameter can be used to control the size of the result.\nIt must be specified in bytes. The `chunkSize` value will only be honored\napproximately. Otherwise a too low `chunkSize` value could cause the server\nto not be able to put just one log entry into the result and return it.\nTherefore, the `chunkSize` value will only be consulted after a log entry has\nbeen written into the result. If the result size is then greater than\n`chunkSize`, the server will respond with as many log entries as there are\nin the response already. If the result size is still less than `chunkSize`,\nthe server will try to return more data if there's more data left to return.\n\nIf `chunkSize` is not specified, some server-side default value will be used.\n\nThe `Content-Type` of the result is `application/x-arango-dump`. This is an\neasy-to-process format, with all log events going onto separate lines in the\nresponse body. Each log event itself is a JSON object, with at least the\nfollowing attributes:\n\n- `tick`: the log event tick value\n\n- `type`: the log event type\n\nIndividual log events will also have additional attributes, depending on the\nevent type. A few common attributes which are used for multiple events types\nare:\n\n- `cid`: id of the collection the event was for\n\n- `tid`: id of the transaction the event was contained in\n\n- `key`: document key\n\n- `rev`: document revision id\n\n- `data`: the original document data\n\nThe response will also contain the following HTTP headers:\n\n- `x-arango-replication-active`: whether or not the logger is active. Clients\n can use this flag as an indication for their polling frequency. If the\n logger is not active and there are no more replication events available, it\n might be sensible for a client to abort, or to go to sleep for a long time\n and try again later to check whether the logger has been activated.\n\n- `x-arango-replication-lastincluded`: the tick value of the last included\n value in the result. In incremental log fetching, this value can be used\n as the `from` value for the following request. **Note** that if the result is\n empty, the value will be `0`. This value should not be used as `from` value\n by clients in the next request (otherwise the server would return the log\n events from the start of the log again).\n\n- `x-arango-replication-lasttick`: the last tick value the logger server has\n logged (not necessarily included in the result). By comparing the last\n tick and last included tick values, clients have an approximate indication of\n how many events there are still left to fetch.\n\n- `x-arango-replication-checkmore`: whether or not there already exists more\n log data which the client could fetch immediately. If there is more log data\n available, the client could call `logger-follow` again with an adjusted `from`\n value to fetch remaining log entries until there are no more.\n\n If there isn't any more log data to fetch, the client might decide to go\n to sleep for a while before calling the logger again.\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", + "operationId": "getReplicationLoggerFollow", "parameters": [ { - "description": "If set to `true`, will return the slow queries from all databases, not just\nthe selected one.\nUsing the parameter is only allowed in the system database and with superuser\nprivileges.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Exclusive lower bound tick value for results.\n", "in": "query", - "name": "all", + "name": "from", + "required": false, + "schema": { + "type": "number" + } + }, + { + "description": "Inclusive upper bound tick value for results.\n", + "in": "query", + "name": "to", + "required": false, + "schema": { + "type": "number" + } + }, + { + "description": "Approximate maximum size of the returned result.\n", + "in": "query", + "name": "chunkSize", + "required": false, + "schema": { + "type": "number" + } + }, + { + "description": "Include system collections in the result. The default value is `true`.\n", + "in": "query", + "name": "includeSystem", "required": false, "schema": { "type": "boolean" @@ -18758,107 +24052,112 @@ ], "responses": { "200": { - "description": "Is returned when the list of queries can be retrieved successfully.\n" + "description": "is returned if the request was executed successfully, and there are log\nevents available for the requested range. The response body will not be empty\nin this case.\n" + }, + "204": { + "description": "is returned if the request was executed successfully, but there are no log\nevents available for the requested range. The response body will be empty\nin this case.\n" }, "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request,\n" + "description": "is returned if either the `from` or `to` values are invalid.\n" }, - "403": { - "description": "*HTTP 403* is returned in case the `all` parameter was used, but the request\nwas made in a different database than _system, or by an non-privileged user.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.\n" + }, + "501": { + "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" } }, - "summary": "List the slow AQL queries", + "summary": "Get replication log entries (deprecated)", "tags": [ - "Queries" + "Replication" ] } }, - "/_api/query/{query-id}": { - "delete": { - "description": "Kills a running query in the currently selected database. The query will be\nterminated at the next cancelation point.\n", - "operationId": "deleteAqlQuery", + "/_db/{database-name}/_api/replication/logger-state": { + "get": { + "description": "Returns the current state of the server's replication logger. The state will\ninclude information about whether the logger is running and about the last\nlogged tick value. This tick value is important for incremental fetching of\ndata.\n\nThe body of the response contains a JSON object with the following\nattributes:\n\n- `state`: the current logger state as a JSON object with the following\n sub-attributes:\n\n - `running`: whether or not the logger is running\n\n - `lastLogTick`: the tick value of the latest tick the logger has logged.\n This value can be used for incremental fetching of log data.\n\n - `totalEvents`: total number of events logged since the server was started.\n The value is not reset between multiple stops and re-starts of the logger.\n\n - `time`: the current date and time on the logger server\n\n- `server`: a JSON object with the following sub-attributes:\n\n - `version`: the logger server's version\n\n - `serverId`: the logger server's id\n\n- `clients`: returns the last fetch status by replication clients connected to\n the logger. Each client is returned as a JSON object with the following attributes:\n\n - `syncerId`: id of the client syncer\n\n - `serverId`: server id of client\n\n - `lastServedTick`: last tick value served to this client via the WAL tailing API\n\n - `time`: date and time when this client last called the WAL tailing API\n", + "operationId": "getReplicationLoggerState", "parameters": [ { - "description": "The id of the query.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "query-id", + "name": "database-name", "required": true, "schema": { "type": "string" } - }, - { - "description": "If set to `true`, will attempt to kill the specified query in all databases,\nnot just the selected one.\nUsing the parameter is only allowed in the system database and with superuser\nprivileges.\n", - "in": "query", - "name": "all", - "required": false, - "schema": { - "type": "boolean" - } } ], "responses": { "200": { - "description": "The server will respond with *HTTP 200* when the query was still running when\nthe kill request was executed and the query's kill flag was set.\n" - }, - "400": { - "description": "The server will respond with *HTTP 400* in case of a malformed request.\n" + "description": "is returned if the logger state could be determined successfully.\n" }, - "403": { - "description": "*HTTP 403* is returned in case the *all* parameter was used, but the request\nwas made in a different database than _system, or by an non-privileged user.\n" + "405": { + "description": "is returned when an invalid HTTP method is used.\n" }, - "404": { - "description": "The server will respond with *HTTP 404* when no query with the specified\nid was found.\n" + "500": { + "description": "is returned if the logger state could not be determined.\n" } }, - "summary": "Kill a running AQL query", + "summary": "Get the replication logger state", "tags": [ - "Queries" + "Replication" ] } }, - "/_api/replication/applier-config": { + "/_db/{database-name}/_api/replication/logger-tick-ranges": { "get": { - "description": "Returns the configuration of the replication applier.\n\nThe body of the response is a JSON object with the configuration. The\nfollowing attributes may be present in the configuration:\n\n- `endpoint`: the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\").\n\n- `database`: the name of the database to connect to (e.g. \"_system\").\n\n- `username`: an optional ArangoDB username to use when connecting to the endpoint.\n\n- `password`: the password to use when connecting to the endpoint.\n\n- `maxConnectRetries`: the maximum number of connection attempts the applier\n will make in a row. If the applier cannot establish a connection to the\n endpoint in this number of attempts, it will stop itself.\n\n- `connectTimeout`: the timeout (in seconds) when attempting to connect to the\n endpoint. This value is used for each connection attempt.\n\n- `requestTimeout`: the timeout (in seconds) for individual requests to the endpoint.\n\n- `chunkSize`: the requested maximum size for log transfer packets that\n is used when the endpoint is contacted.\n\n- `autoStart`: whether or not to auto-start the replication applier on\n (next and following) server starts\n\n- `adaptivePolling`: whether or not the replication applier will use\n adaptive polling.\n\n- `includeSystem`: whether or not system collection operations will be applied\n\n- `autoResync`: whether or not the follower should perform a full automatic\n resynchronization with the leader in case the leader cannot serve log data\n requested by the follower, or when the replication is started and no tick\n value\n can be found.\n\n- `autoResyncRetries`: number of resynchronization retries that will be performed\n in a row when automatic resynchronization is enabled and kicks in. Setting this\n to `0` will effectively disable `autoResync`. Setting it to some other value\n will limit the number of retries that are performed. This helps preventing endless\n retries in case resynchronizations always fail.\n\n- `initialSyncMaxWaitTime`: the maximum wait time (in seconds) that the initial\n synchronization will wait for a response from the leader when fetching initial\n collection data.\n This wait time can be used to control after what time the initial synchronization\n will give up waiting for a response and fail. This value is relevant even\n for continuous replication when `autoResync` is set to `true` because this\n may re-start the initial synchronization when the leader cannot provide\n log data the follower requires.\n This value will be ignored if set to `0`.\n\n- `connectionRetryWaitTime`: the time (in seconds) that the applier will\n intentionally idle before it retries connecting to the leader in case of\n connection problems.\n This value will be ignored if set to `0`.\n\n- `idleMinWaitTime`: the minimum wait time (in seconds) that the applier will\n intentionally idle before fetching more log data from the leader in case\n the leader has already sent all its log data. This wait time can be used\n to control the frequency with which the replication applier sends HTTP log\n fetch requests to the leader in case there is no write activity on the leader.\n This value will be ignored if set to `0`.\n\n- `idleMaxWaitTime`: the maximum wait time (in seconds) that the applier will\n intentionally idle before fetching more log data from the leader in case the\n leader has already sent all its log data and there have been previous log\n fetch attempts that resulted in no more log data. This wait time can be used\n to control the maximum frequency with which the replication applier sends HTTP\n log fetch requests to the leader in case there is no write activity on the\n leader for longer periods. This configuration value will only be used if the\n option `adaptivePolling` is set to `true`.\n This value will be ignored if set to `0`.\n\n- `requireFromPresent`: if set to `true`, then the replication applier will check\n at start whether the start tick from which it starts or resumes replication is\n still present on the leader. If not, then there would be data loss. If\n `requireFromPresent` is `true`, the replication applier will abort with an\n appropriate error message. If set to `false`, then the replication applier will\n still start, and ignore the data loss.\n\n- `verbose`: if set to `true`, then a log line will be emitted for all operations\n performed by the replication applier. This should be used for debugging\n replication\n problems only.\n\n- `restrictType`: the configuration for `restrictCollections`\n\n- `restrictCollections`: the optional array of collections to include or exclude,\n based on the setting of `restrictType`\n", - "operationId": "getReplicationApplierConfig", + "description": "Returns the currently available ranges of tick values for all currently\navailable WAL logfiles. The tick values can be used to determine if certain\ndata (identified by tick value) are still available for replication.\n\nThe body of the response contains a JSON array. Each array member is an\nobject\nthat describes a single logfile. Each object has the following attributes:\n\n- `datafile`: name of the logfile\n\n- `status`: status of the datafile, in textual form (e.g. \"sealed\", \"open\")\n\n- `tickMin`: minimum tick value contained in logfile\n\n- `tickMax`: maximum tick value contained in logfile\n", + "operationId": "getReplicationLoggerTickRanges", "parameters": [ { - "description": "If set to `true`, returns the configuration of the global replication applier for all\ndatabases. If set to `false`, returns the configuration of the replication applier in the\nselected database.\n", - "in": "query", - "name": "global", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], "responses": { "200": { - "description": "is returned if the request was executed successfully.\n" + "description": "is returned if the tick ranges could be determined successfully.\n" }, "405": { "description": "is returned when an invalid HTTP method is used.\n" }, "500": { - "description": "is returned if an error occurred while assembling the response.\n" + "description": "is returned if the logger state could not be determined.\n" + }, + "501": { + "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" } }, - "summary": "Get the replication applier configuration", + "summary": "Get the tick ranges available in the WAL logfiles", "tags": [ "Replication" ] - }, + } + }, + "/_db/{database-name}/_api/replication/make-follower": { "put": { - "description": "Sets the configuration of the replication applier. The configuration can\nonly be changed while the applier is not running. The updated configuration\nwill be saved immediately but only become active with the next start of the\napplier.\n\nIn case of success, the body of the response is a JSON object with the updated\nconfiguration.\n", - "operationId": "updateReplicationApplierConfig", + "description": "\u003e **WARNING:**\nCalling this endpoint will synchronize data from the collections found on the\nremote leader to the local ArangoDB database. All data in the local collections\nwill be purged and replaced with data from the leader. Use with caution!\n\n\n\u003e **INFO:**\nThis command may take a long time to complete and return. This is because it\nwill first do a full data synchronization with the leader, which will take time\nroughly proportional to the amount of data.\n\n\nChanges the role to a follower and starts a full data synchronization from a\nremote endpoint into the local ArangoDB database and afterwards starts the\ncontinuous replication.\n\nThe operation works on a per-database level.\n\nAll local database data will be removed prior to the synchronization.\n\nIn case of success, the body of the response is a JSON object with the following\nattributes:\n\n- `state`: a JSON object with the following sub-attributes:\n\n - `running`: whether or not the applier is active and running\n\n - `lastAppliedContinuousTick`: the last tick value from the continuous\n replication log the applier has applied.\n\n - `lastProcessedContinuousTick`: the last tick value from the continuous\n replication log the applier has processed.\n\n Regularly, the last applied and last processed tick values should be\n identical. For transactional operations, the replication applier will first\n process incoming log events before applying them, so the processed tick\n value might be higher than the applied tick value. This will be the case\n until the applier encounters the *transaction commit* log event for the\n transaction.\n\n - `lastAvailableContinuousTick`: the last tick value the remote server can\n provide.\n\n - `ticksBehind`: this attribute will be present only if the applier is currently\n running. It will provide the number of log ticks between what the applier\n has applied/seen and the last log tick value provided by the remote server.\n If this value is zero, then both servers are in sync. If this is non-zero,\n then the remote server has additional data that the applier has not yet\n fetched and processed, or the remote server may have more data that is not\n applicable to the applier.\n\n Client applications can use it to determine approximately how far the applier\n is behind the remote server, and can periodically check if the value is\n increasing (applier is falling behind) or decreasing (applier is catching up).\n\n Please note that as the remote server will only keep one last log tick value\n for all of its databases, but replication may be restricted to just certain\n databases on the applier, this value is more meaningful when the global applier\n is used.\n Additionally, the last log tick provided by the remote server may increase\n due to writes into system collections that are not replicated due to replication\n configuration. So the reported value may exaggerate the reality a bit for\n some scenarios.\n\n - `time`: the time on the applier server.\n\n - `totalRequests`: the total number of requests the applier has made to the\n endpoint.\n\n - `totalFailedConnects`: the total number of failed connection attempts the\n applier has made.\n\n - `totalEvents`: the total number of log events the applier has processed.\n\n - `totalOperationsExcluded`: the total number of log events excluded because\n of `restrictCollections`.\n\n - `progress`: a JSON object with details about the replication applier progress.\n It contains the following sub-attributes if there is progress to report:\n\n - `message`: a textual description of the progress\n\n - `time`: the date and time the progress was logged\n\n - `failedConnects`: the current number of failed connection attempts\n\n - `lastError`: a JSON object with details about the last error that happened on\n the applier. It contains the following sub-attributes if there was an error:\n\n - `errorNum`: a numerical error code\n\n - `errorMessage`: a textual error description\n\n - `time`: the date and time the error occurred\n\n In case no error has occurred, `lastError` will be empty.\n\n- `server`: a JSON object with the following sub-attributes:\n\n - `version`: the applier server's version\n\n - `serverId`: the applier server's id\n\n- `endpoint`: the endpoint the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\n- `database`: the name of the database the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\nPlease note that all \"tick\" values returned do not have a specific unit. Tick\nvalues are only meaningful when compared to each other. Higher tick values mean\n\"later in time\" than lower tick values.\n\n\u003e **INFO:**\nThis endpoint is not supported on a Coordinator in a cluster deployment.\n", + "operationId": "makeReplicationFollower", "parameters": [ { - "description": "If set to `true`, adjusts the configuration of the global replication applier for all\ndatabases. If set to `false`, adjusts the configuration of the replication applier in the\nselected database.\n", - "in": "query", - "name": "global", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], @@ -18868,21 +24167,17 @@ "schema": { "properties": { "adaptivePolling": { - "description": "if set to `true`, the replication applier will fall\nto sleep for an increasingly long period in case the logger server at the\nendpoint does not have any more replication events to apply. Using\nadaptive polling is thus useful to reduce the amount of work for both the\napplier and the logger server for cases when there are only infrequent\nchanges. The downside is that when using adaptive polling, it might take\nlonger for the replication applier to detect that there are new replication\nevents on the logger server.\n\nSetting `adaptivePolling` to false will make the replication applier\ncontact the logger server in a constant interval, regardless of whether\nthe logger server provides updates frequently or seldom.\n", + "description": "whether or not the replication applier will use adaptive polling.\n", "type": "boolean" }, "autoResync": { - "description": "whether or not the follower should perform a full automatic resynchronization\nwith the leader in case the leader cannot serve log data requested by the\nfollower, or when the replication is started and no tick value can be found.\n", + "description": "whether or not the follower should perform an automatic resynchronization with\nthe leader in case the leader cannot serve log data requested by the follower,\nor when the replication is started and no tick value can be found.\n", "type": "boolean" }, "autoResyncRetries": { - "description": "number of resynchronization retries that will be performed in a row when\nautomatic resynchronization is enabled and kicks in. Setting this to `0`\nwill\neffectively disable `autoResync`. Setting it to some other value will limit\nthe number of retries that are performed. This helps preventing endless\nretries\nin case resynchronizations always fail.\n", + "description": "number of resynchronization retries that will be performed in a row when\nautomatic resynchronization is enabled and kicks in. Setting this to `0` will\neffectively disable `autoResync`. Setting it to some other value will limit\nthe number of retries that are performed. This helps preventing endless retries\nin case resynchronizations always fail.\n", "type": "integer" }, - "autoStart": { - "description": "whether or not to auto-start the replication applier on\n(next and following) server starts\n", - "type": "boolean" - }, "chunkSize": { "description": "the requested maximum size for log transfer packets that\nis used when the endpoint is contacted.\n", "type": "integer" @@ -18896,11 +24191,11 @@ "type": "integer" }, "database": { - "description": "the name of the database on the endpoint. If not specified, defaults to the current local database name.\n", + "description": "the database name on the leader (if not specified, defaults to the\nname of the local current database).\n", "type": "string" }, "endpoint": { - "description": "the logger server to connect to (e.g. \"tcp://192.168.173.13:8529\"). The endpoint must be specified.\n", + "description": "the leader endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").\n", "type": "string" }, "idleMaxWaitTime": { @@ -18916,7 +24211,7 @@ "type": "boolean" }, "initialSyncMaxWaitTime": { - "description": "the maximum wait time (in seconds) that the initial synchronization will\nwait for a response from the leader when fetching initial collection data.\nThis wait time can be used to control after what time the initial\nsynchronization\nwill give up waiting for a response and fail. This value is relevant even\nfor continuous replication when `autoResync` is set to `true` because this\nmay re-start the initial synchronization when the leader cannot provide\nlog data the follower requires.\nThis value will be ignored if set to `0`.\n", + "description": "the maximum wait time (in seconds) that the initial synchronization will\nwait for a response from the leader when fetching initial collection data.\nThis wait time can be used to control after what time the initial synchronization\nwill give up waiting for a response and fail. This value is relevant even\nfor continuous replication when `autoResync` is set to `true` because this\nmay re-start the initial synchronization when the leader cannot provide\nlog data the follower requires.\nThis value will be ignored if set to `0`.\n", "type": "integer" }, "maxConnectRetries": { @@ -18924,7 +24219,7 @@ "type": "integer" }, "password": { - "description": "the password to use when connecting to the endpoint.\n", + "description": "the password to use when connecting to the leader.\n", "type": "string" }, "requestTimeout": { @@ -18932,26 +24227,26 @@ "type": "integer" }, "requireFromPresent": { - "description": "if set to `true`, then the replication applier will check\nat start whether the start tick from which it starts or resumes replication is\nstill present on the leader. If not, then there would be data loss. If\n`requireFromPresent` is `true`, the replication applier will abort with an\nappropriate error message. If set to `false`, then the replication applier will\nstill start, and ignore the data loss.\n", + "description": "if set to `true`, then the replication applier will check\nat start of its continuous replication if the start tick from the dump phase\nis still present on the leader. If not, then there would be data loss. If\n`requireFromPresent` is `true`, the replication applier will abort with an\nappropriate error message. If set to `false`, then the replication applier will\nstill start, and ignore the data loss.\n", "type": "boolean" }, "restrictCollections": { - "description": "the array of collections to include or exclude,\nbased on the setting of `restrictType`\n", + "description": "an optional array of collections for use with `restrictType`.\nIf `restrictType` is `include`, only the specified collections\nwill be synchronized. If `restrictType` is `exclude`, all but the specified\ncollections will be synchronized.\n", "items": { "type": "string" }, "type": "array" }, "restrictType": { - "description": "the configuration for `restrictCollections`; Has to be either `include` or `exclude`\n", + "description": "an optional string value for collection filtering. When\nspecified, the allowed values are `include` or `exclude`.\n", "type": "string" }, "username": { - "description": "an optional ArangoDB username to use when connecting to the endpoint.\n", + "description": "an optional ArangoDB username to use when connecting to the leader.\n", "type": "string" }, "verbose": { - "description": "if set to `true`, then a log line will be emitted for all operations\nperformed by the replication applier. This should be used for debugging replication\nproblems only.\n", + "description": "if set to `true`, then a log line will be emitted for all operations\nperformed by the replication applier. This should be used for debugging\nreplication\nproblems only.\n", "type": "boolean" } }, @@ -18959,60 +24254,133 @@ "endpoint", "database", "password", - "maxConnectRetries", - "connectTimeout", - "requestTimeout", - "chunkSize", - "autoStart", - "adaptivePolling", - "includeSystem", - "requireFromPresent", - "verbose", - "restrictType" + "includeSystem" ], "type": "object" } } } - }, + }, + "responses": { + "200": { + "description": "is returned if the request was executed successfully.\n" + }, + "400": { + "description": "is returned if the configuration is incomplete or malformed.\n" + }, + "405": { + "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred during synchronization or when starting the\ncontinuous replication.\n" + }, + "501": { + "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" + } + }, + "summary": "Turn a server into a follower of another", + "tags": [ + "Replication" + ] + } + }, + "/_db/{database-name}/_api/replication/revisions/documents": { + "put": { + "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with collections\ncreated in ArangoDB v3.8.0 or later.\n\n\nReturns documents by revision for replication.\n\nThe body of the request should be JSON/VelocyPack and should consist of an\narray of string-encoded revision IDs:\n\n```\n[\n \u003cString, revision\u003e,\n \u003cString, revision\u003e,\n ...\n \u003cString, revision\u003e\n]\n```\n\nIn particular, the revisions should be sorted in ascending order of their\ndecoded values.\n\nThe result will be a JSON/VelocyPack array of document objects. If there is no\ndocument corresponding to a particular requested revision, an empty object will\nbe returned in its place.\n\nThe response may be truncated if it would be very long. In this case, the\nresponse array length will be less than the request array length, and\nsubsequent requests can be made for the omitted documents.\n\nEach `\u003cString, revision\u003e` value type is a 64-bit value encoded as a string of\n11 characters, using the same encoding as our document `_rev` values. The\nreason for this is that 64-bit values cannot necessarily be represented in full\nin JavaScript, as it handles all numbers as floating point, and can only\nrepresent up to `2^53-1` faithfully.\n", + "operationId": "listReplicationRevisionDocuments", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name or id of the collection to query.\n", + "in": "query", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The id of the snapshot to use\n", + "in": "query", + "name": "batchId", + "required": true, + "schema": { + "type": "number" + } + } + ], "responses": { "200": { - "description": "is returned if the request was executed successfully.\n" + "description": "is returned if the request was executed successfully and data was returned.\n" }, - "400": { - "description": "is returned if the configuration is incomplete or malformed, or if the\nreplication applier is currently running.\n" + "401": { + "description": "is returned if necessary parameters are missing or incorrect\n" + }, + "404": { + "description": "is returned when the collection or snapshot could not be found.\n" }, "405": { "description": "is returned when an invalid HTTP method is used.\n" }, "500": { "description": "is returned if an error occurred while assembling the response.\n" + }, + "501": { + "description": "is returned if called on a collection which doesn't support sync-by-revision\n" } }, - "summary": "Update the replication applier configuration", + "summary": "Get documents by revision", "tags": [ "Replication" ] } }, - "/_api/replication/applier-start": { + "/_db/{database-name}/_api/replication/revisions/ranges": { "put": { - "description": "Starts the replication applier. This will return immediately if the\nreplication applier is already running.\n\nIf the replication applier is not already running, the applier configuration\nwill be checked, and if it is complete, the applier will be started in a\nbackground thread. This means that even if the applier will encounter any\nerrors while running, they will not be reported in the response to this\nmethod.\n\nTo detect replication applier errors after the applier was started, use the\n`/_api/replication/applier-state` API instead.\n", - "operationId": "startReplicationApplier", + "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with the RocksDB\nengine, and with collections created in ArangoDB v3.8.0 or later.\n\n\nReturns the revision IDs of documents within the requested ranges.\n\nThe body of the request should be JSON/VelocyPack and should consist of an\narray of pairs of string-encoded revision IDs:\n\n```\n[\n [\u003cString, revision\u003e, \u003cString, revision\u003e],\n [\u003cString, revision\u003e, \u003cString, revision\u003e],\n ...\n [\u003cString, revision\u003e, \u003cString, revision\u003e]\n]\n```\n\nIn particular, the pairs should be non-overlapping, and sorted in ascending\norder of their decoded values.\n\nThe result will be JSON/VelocyPack in the following format:\n```\n{\n ranges: [\n [\u003cString, revision\u003e, \u003cString, revision\u003e, ... \u003cString, revision\u003e],\n [\u003cString, revision\u003e, \u003cString, revision\u003e, ... \u003cString, revision\u003e],\n ...,\n [\u003cString, revision\u003e, \u003cString, revision\u003e, ... \u003cString, revision\u003e]\n ]\n resume: \u003cString, revision\u003e\n}\n```\n\nThe `resume` field is optional. If specified, then the response is to be\nconsidered partial, only valid through the revision specified. A subsequent\nrequest should be made with the same request body, but specifying the `resume`\nURL parameter with the value specified. The subsequent response will pick up\nfrom the appropriate request pair, and omit any complete ranges or revisions\nwhich are less than the requested resume revision. As an example (ignoring the\nstring-encoding for a moment), if ranges `[1, 3], [5, 9], [12, 15]` are\nrequested, then a first response may return `[], [5, 6]` with a resume point of\n`7` and a subsequent response might be `[8], [12, 13]`.\n\nIf a requested range contains no revisions, then an empty array is returned.\nEmpty ranges will not be omitted.\n\nEach `\u003cString, revision\u003e` value type is a 64-bit value encoded as a string of\n11 characters, using the same encoding as our document `_rev` values. The\nreason for this is that 64-bit values cannot necessarily be represented in full\nin JavaScript, as it handles all numbers as floating point, and can only\nrepresent up to `2^53-1` faithfully.\n", + "operationId": "listReplicationRevisionRanges", "parameters": [ { - "description": "If set to `true`, starts the global replication applier for all\ndatabases. If set to `false`, starts the replication applier in the\nselected database.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name or id of the collection to query.\n", "in": "query", - "name": "global", - "required": false, + "name": "collection", + "required": true, "schema": { - "type": "boolean" + "type": "string" } }, { - "description": "The remote `lastLogTick` value from which to start applying. If not specified,\nthe last saved tick from the previous applier run is used. If there is no\nprevious applier state saved, the applier will start at the beginning of the\nlogger server's log.\n", + "description": "The id of the snapshot to use\n", "in": "query", - "name": "from", + "name": "batchId", + "required": true, + "schema": { + "type": "number" + } + }, + { + "description": "The revision at which to resume, if a previous request was truncated\n", + "in": "query", + "name": "resume", "required": false, "schema": { "type": "string" @@ -19021,177 +24389,275 @@ ], "responses": { "200": { - "description": "is returned if the request was executed successfully.\n" + "description": "is returned if the request was executed successfully and data was returned.\n" }, - "400": { - "description": "is returned if the replication applier is not fully configured or the\nconfiguration is invalid.\n" + "401": { + "description": "is returned if necessary parameters are missing or incorrect\n" + }, + "404": { + "description": "is returned when the collection or snapshot could not be found.\n" }, "405": { "description": "is returned when an invalid HTTP method is used.\n" }, "500": { "description": "is returned if an error occurred while assembling the response.\n" + }, + "501": { + "description": "is returned if called on a collection which doesn't support sync-by-revision\n" } }, - "summary": "Start the replication applier", + "summary": "List document revision IDs within requested ranges", "tags": [ "Replication" ] } }, - "/_api/replication/applier-state": { + "/_db/{database-name}/_api/replication/revisions/tree": { "get": { - "description": "Returns the state of the replication applier, regardless of whether the\napplier is currently running or not.\n\nThe response is a JSON object with the following attributes:\n\n- `state`: a JSON object with the following sub-attributes:\n\n - `running`: whether or not the applier is active and running\n\n - `lastAppliedContinuousTick`: the last tick value from the continuous\n replication log the applier has applied.\n\n - `lastProcessedContinuousTick`: the last tick value from the continuous\n replication log the applier has processed.\n\n Regularly, the last applied and last processed tick values should be\n identical. For transactional operations, the replication applier will first\n process incoming log events before applying them, so the processed tick\n value might be higher than the applied tick value. This will be the case\n until the applier encounters the *transaction commit* log event for the\n transaction.\n\n - `lastAvailableContinuousTick`: the last tick value the remote server can\n provide, for all databases.\n\n - `ticksBehind`: this attribute will be present only if the applier is currently\n running. It will provide the number of log ticks between what the applier\n has applied/seen and the last log tick value provided by the remote server.\n If this value is zero, then both servers are in sync. If this is non-zero,\n then the remote server has additional data that the applier has not yet\n fetched and processed, or the remote server may have more data that is not\n applicable to the applier.\n\n Client applications can use it to determine approximately how far the applier\n is behind the remote server, and can periodically check if the value is\n increasing (applier is falling behind) or decreasing (applier is catching up).\n\n Please note that as the remote server will only keep one last log tick value\n for all of its databases, but replication may be restricted to just certain\n databases on the applier, this value is more meaningful when the global applier\n is used.\n Additionally, the last log tick provided by the remote server may increase\n due to writes into system collections that are not replicated due to replication\n configuration. So the reported value may exaggerate the reality a bit for\n some scenarios.\n\n - `time`: the time on the applier server.\n\n - `totalRequests`: the total number of requests the applier has made to the\n endpoint.\n\n - `totalFailedConnects`: the total number of failed connection attempts the\n applier has made.\n\n - `totalEvents`: the total number of log events the applier has processed.\n\n - `totalOperationsExcluded`: the total number of log events excluded because\n of `restrictCollections`.\n\n - `progress`: a JSON object with details about the replication applier progress.\n It contains the following sub-attributes if there is progress to report:\n\n - `message`: a textual description of the progress\n\n - `time`: the date and time the progress was logged\n\n - `failedConnects`: the current number of failed connection attempts\n\n - `lastError`: a JSON object with details about the last error that happened on\n the applier. It contains the following sub-attributes if there was an error:\n\n - `errorNum`: a numerical error code\n\n - `errorMessage`: a textual error description\n\n - `time`: the date and time the error occurred\n\n In case no error has occurred, `lastError` will be empty.\n\n- `server`: a JSON object with the following sub-attributes:\n\n - `version`: the applier server's version\n\n - `serverId`: the applier server's id\n\n- `endpoint`: the endpoint the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\n- `database`: the name of the database the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\nPlease note that all \"tick\" values returned do not have a specific unit. Tick\nvalues are only meaningful when compared to each other. Higher tick values mean\n\"later in time\" than lower tick values.\n", - "operationId": "getReplicationApplierState", + "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with collections\ncreated in ArangoDB v3.8.0 or later.\n\n\nReturns the Merkle tree associated with the specified collection.\n\nThe result will be JSON/VelocyPack in the following format:\n```\n{\n version: \u003cNumber\u003e,\n branchingFactor: \u003cNumber\u003e\n maxDepth: \u003cNumber\u003e,\n rangeMin: \u003cString, revision\u003e,\n rangeMax: \u003cString, revision\u003e,\n nodes: [\n { count: \u003cNumber\u003e, hash: \u003cString, revision\u003e },\n { count: \u003cNumber\u003e, hash: \u003cString, revision\u003e },\n ...\n { count: \u003cNumber\u003e, hash: \u003cString, revision\u003e }\n ]\n}\n```\n\nAt the moment, there is only one version, 1, so this can safely be ignored for\nnow.\n\nEach `\u003cString, revision\u003e` value type is a 64-bit value encoded as a string of\n11 characters, using the same encoding as our document `_rev` values. The\nreason for this is that 64-bit values cannot necessarily be represented in full\nin JavaScript, as it handles all numbers as floating point, and can only\nrepresent up to `2^53-1` faithfully.\n\nThe node count should correspond to a full tree with the given `maxDepth` and\n`branchingFactor`. The nodes are laid out in level-order tree traversal, so the\nroot is at index `0`, its children at indices `[1, branchingFactor]`, and so\non.\n", + "operationId": "getReplicationRevisionTree", "parameters": [ { - "description": "If set to `true`, returns the state of the global replication applier for all\ndatabases. If set to `false`, returns the state of the replication applier in the\nselected database.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name or id of the collection to query.\n", "in": "query", - "name": "global", - "required": false, + "name": "collection", + "required": true, "schema": { - "type": "boolean" + "type": "string" + } + }, + { + "description": "The id of the snapshot to use\n", + "in": "query", + "name": "batchId", + "required": true, + "schema": { + "type": "number" } } ], "responses": { "200": { - "description": "is returned if the request was executed successfully.\n" + "description": "is returned if the request was executed successfully and data was returned.\n" + }, + "401": { + "description": "is returned if necessary parameters are missing\n" + }, + "404": { + "description": "is returned when the collection or snapshot could not be found.\n" }, "405": { "description": "is returned when an invalid HTTP method is used.\n" }, "500": { "description": "is returned if an error occurred while assembling the response.\n" + }, + "501": { + "description": "is returned if called on a collection which doesn't support sync-by-revision\n" } }, - "summary": "Get the replication applier state", + "summary": "Get the replication revision tree", "tags": [ "Replication" ] - } - }, - "/_api/replication/applier-stop": { - "put": { - "description": "Stops the replication applier. This will return immediately if the\nreplication applier is not running.\n", - "operationId": "stopReplicationApplier", + }, + "post": { + "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with collections\ncreated in ArangoDB v3.8.0 or later.\n\n\nRebuilds the Merkle tree for a collection.\n\nIf successful, there will be no return body.\n", + "operationId": "rebuildReplicationRevisionTree", "parameters": [ { - "description": "If set to `true`, stops the global replication applier for all\ndatabases. If set to `false`, stops the replication applier in the\nselected database.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name or id of the collection to query.\n", "in": "query", - "name": "global", - "required": false, + "name": "collection", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], "responses": { - "200": { + "204": { "description": "is returned if the request was executed successfully.\n" }, + "401": { + "description": "is returned if necessary parameters are missing\n" + }, + "404": { + "description": "is returned when the collection or could not be found.\n" + }, "405": { "description": "is returned when an invalid HTTP method is used.\n" }, "500": { "description": "is returned if an error occurred while assembling the response.\n" + }, + "501": { + "description": "is returned if called on a collection which doesn't support sync-by-revision\n" } }, - "summary": "Stop the replication applier", + "summary": "Rebuild the replication revision tree", "tags": [ "Replication" ] } }, - "/_api/replication/batch": { - "post": { - "description": "\u003e **INFO:**\nThis is an internally used endpoint.\n\n\nCreates a new dump batch and returns the batch's id.\n\nThe response is a JSON object with the following attributes:\n\n- `id`: the id of the batch\n- `lastTick`: snapshot tick value using when creating the batch\n- `state`: additional leader state information (only present if the\n `state` URL parameter was set to `true` in the request)\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n", - "operationId": "createReplicationBatch", + "/_db/{database-name}/_api/replication/server-id": { + "get": { + "description": "Returns the servers id. The id is also returned by other replication API\nmethods, and this method is an easy means of determining a server's id.\n\nThe body of the response is a JSON object with the attribute `serverId`. The\nserver id is returned as a string.\n", + "operationId": "getReplicationServerId", "parameters": [ { - "description": "setting `state` to true will make the response also contain\na `state` attribute with information about the leader state.\nThis is used only internally during the replication process\nand should not be used by client applications.\n", - "in": "query", - "name": "state", - "required": false, + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "ttl": { - "description": "The time-to-live for the new batch (in seconds).\n", - "type": "integer" - } - }, - "required": [ - "ttl" - ], - "type": "object" - } - } - } - }, "responses": { "200": { - "description": "is returned if the batch was created successfully.\n" - }, - "400": { - "description": "is returned if the TTL value is invalid or if the `DBserver` attribute\nis not specified or illegal on a Coordinator.\n" + "description": "is returned if the request was executed successfully.\n" }, "405": { "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred while assembling the response.\n" } }, - "summary": "Create a new dump batch", + "summary": "Get the replication server ID", "tags": [ "Replication" ] } }, - "/_api/replication/batch/{id}": { - "delete": { - "description": "\u003e **INFO:**\nThis is an internally used endpoint.\n\n\nDeletes the existing dump batch, allowing compaction and cleanup to resume.\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n", - "operationId": "deleteReplicationBatch", + "/_db/{database-name}/_api/replication/sync": { + "put": { + "description": "Starts a full data synchronization from a remote endpoint into the local\nArangoDB database.\n\nThe *sync* method can be used by replication clients to connect an ArangoDB database\nto a remote endpoint, fetch the remote list of collections and indexes, and collection\ndata. It will thus create a local backup of the state of data at the remote ArangoDB\ndatabase. *sync* works on a per-database level.\n\n*sync* will first fetch the list of collections and indexes from the remote endpoint.\nIt does so by calling the *inventory* API of the remote database. It will then purge\ndata in the local ArangoDB database, and after start will transfer collection data\nfrom the remote database to the local ArangoDB database. It will extract data from the\nremote database by calling the remote database's *dump* API until all data are fetched.\n\nIn case of success, the body of the response is a JSON object with the following\nattributes:\n\n- *collections*: an array of collections that were transferred from the endpoint\n\n- *lastLogTick*: the last log tick on the endpoint at the time the transfer\n was started. Use this value as the *from* value when starting the continuous\n synchronization later.\n\nWARNING: calling this method will synchronize data from the collections found\non the remote endpoint to the local ArangoDB database. All data in the local\ncollections will be purged and replaced with data from the endpoint.\n\nUse with caution!\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", + "operationId": "startReplicationSync", "parameters": [ { - "description": "The id of the batch.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "id", + "name": "database-name", "required": true, "schema": { "type": "string" } } - ], + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "database": { + "description": "the database name on the leader (if not specified, defaults to the\nname of the local current database).\n", + "type": "string" + }, + "endpoint": { + "description": "the leader endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").\n", + "type": "string" + }, + "includeSystem": { + "description": "whether or not system collection operations will be applied\n", + "type": "boolean" + }, + "incremental": { + "description": "if set to *true*, then an incremental synchronization method will be used\nfor synchronizing data in collections. This method is useful when\ncollections already exist locally, and only the remaining differences need\nto be transferred from the remote endpoint. In this case, the incremental\nsynchronization can be faster than a full synchronization.\nThe default value is *false*, meaning that the complete data from the remote\ncollection will be transferred.\n", + "type": "boolean" + }, + "initialSyncMaxWaitTime": { + "description": "the maximum wait time (in seconds) that the initial synchronization will\nwait for a response from the leader when fetching initial collection data.\nThis wait time can be used to control after what time the initial synchronization\nwill give up waiting for a response and fail.\nThis value will be ignored if set to *0*.\n", + "type": "integer" + }, + "password": { + "description": "the password to use when connecting to the endpoint.\n", + "type": "string" + }, + "restrictCollections": { + "description": "an optional array of collections for use with\n*restrictType*. If *restrictType* is *include*, only the specified collections\nwill be synchronized. If *restrictType* is *exclude*, all but the specified\ncollections will be synchronized.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "restrictType": { + "description": "an optional string value for collection filtering. When\nspecified, the allowed values are *include* or *exclude*.\n", + "type": "string" + }, + "username": { + "description": "an optional ArangoDB username to use when connecting to the endpoint.\n", + "type": "string" + } + }, + "required": [ + "endpoint", + "password" + ], + "type": "object" + } + } + } + }, "responses": { - "204": { - "description": "is returned if the batch was deleted successfully.\n" + "200": { + "description": "is returned if the request was executed successfully.\n" }, "400": { - "description": "is returned if the batch was not found.\n" + "description": "is returned if the configuration is incomplete or malformed.\n" }, "405": { "description": "is returned when an invalid HTTP method is used.\n" + }, + "500": { + "description": "is returned if an error occurred during synchronization.\n" + }, + "501": { + "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" } }, - "summary": "Delete an existing dump batch", + "summary": "Start replication from a remote endpoint", "tags": [ "Replication" ] - }, - "put": { - "description": "\u003e **INFO:**\nThis is an internally used endpoint.\n\n\nExtends the time-to-live (TTL) of an existing dump batch, using the batch's ID and\nthe provided TTL value.\n\nIf the batch's TTL can be extended successfully, the response is empty.\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n", - "operationId": "extendReplicationBatch", + } + }, + "/_db/{database-name}/_api/tasks": { + "post": { + "description": "creates a new task with a generated id\n", + "operationId": "createTask", "parameters": [ { - "description": "The id of the batch.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "id", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -19203,161 +24669,209 @@ "application/json": { "schema": { "properties": { - "ttl": { - "description": "the time-to-live for the new batch (in seconds)\n", + "command": { + "description": "The JavaScript code to be executed\n", + "type": "string" + }, + "name": { + "description": "The name of the task\n", + "type": "string" + }, + "offset": { + "description": "Number of seconds initial delay\n", + "type": "integer" + }, + "params": { + "description": "The parameters to be passed into command\n", + "type": "string" + }, + "period": { + "description": "number of seconds between the executions\n", "type": "integer" } }, "required": [ - "ttl" + "name", + "command", + "params" ], "type": "object" } } } }, - "responses": { - "204": { - "description": "is returned if the batch's ttl was extended successfully.\n" - }, - "400": { - "description": "is returned if the ttl value is invalid or the batch was not found.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - } - }, - "summary": "Extend the TTL of a dump batch", - "tags": [ - "Replication" - ] - } - }, - "/_api/replication/clusterInventory": { - "get": { - "description": "Returns the array of collections and indexes available on the cluster.\n\nThe response will be an array of JSON objects, one for each collection.\nEach collection contains exactly two keys, `parameters` and `indexes`.\nThis information comes from `Plan/Collections/{DB-Name}/*` in the Agency,\njust that the `indexes` attribute there is relocated to adjust it to\nthe data format of arangodump.\n", - "operationId": "getReplicationClusterInventory", - "parameters": [ - { - "description": "Include system collections in the result. The default value is `true`.\n", - "in": "query", - "name": "includeSystem", - "required": false, - "schema": { - "type": "boolean" - } - } - ], "responses": { "200": { - "description": "is returned if the request was executed successfully.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The status code, 200 in this case.\n", + "type": "number" + }, + "command": { + "description": "the javascript function for this task\n", + "type": "string" + }, + "created": { + "description": "The timestamp when this task was created\n", + "type": "number" + }, + "database": { + "description": "the database this task belongs to\n", + "type": "string" + }, + "error": { + "description": "`false` in this case\n", + "type": "boolean" + }, + "id": { + "description": "A string identifying the task\n", + "type": "string" + }, + "offset": { + "description": "time offset in seconds from the created timestamp\n", + "type": "number" + }, + "period": { + "description": "this task should run each `period` seconds\n", + "type": "number" + }, + "type": { + "description": "What type of task is this [ `periodic`, `timed`]\n - periodic are tasks that repeat periodically\n - timed are tasks that execute once at a specific time\n", + "type": "string" + } + }, + "required": [ + "id", + "created", + "type", + "period", + "offset", + "command", + "database", + "code", + "error" + ], + "type": "object" + } + } + }, + "description": "The task was registered\n" }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" + "400": { + "description": "If the post body is not accurate, a *HTTP 400* is returned.\n" } }, - "summary": "Get the cluster collections and indexes", + "summary": "Create a task", "tags": [ - "Replication" + "Tasks" ] } }, - "/_api/replication/dump": { + "/_db/{database-name}/_api/tasks/": { "get": { - "description": "Returns the data from a collection for the requested range.\n\nThe `chunkSize` query parameter can be used to control the size of the result.\nIt must be specified in bytes. The `chunkSize` value will only be honored\napproximately. Otherwise a too low `chunkSize` value could cause the server\nto not be able to put just one entry into the result and return it.\nTherefore, the `chunkSize` value will only be consulted after an entry has\nbeen written into the result. If the result size is then greater than\n`chunkSize`, the server will respond with as many entries as there are\nin the response already. If the result size is still less than `chunkSize`,\nthe server will try to return more data if there's more data left to return.\n\nIf `chunkSize` is not specified, some server-side default value will be used.\n\nThe `Content-Type` of the result is `application/x-arango-dump`. This is an\neasy-to-process format, with all entries going onto separate lines in the\nresponse body.\n\nEach line itself is a JSON object, with at least the following attributes:\n\n- `tick`: the operation's tick attribute\n\n- `key`: the key of the document/edge or the key used in the deletion operation\n\n- `rev`: the revision id of the document/edge or the deletion operation\n\n- `data`: the actual document/edge data for types 2300 and 2301. The full\n document/edge data will be returned even for updates.\n\n- `type`: the type of entry. Possible values for `type` are:\n\n - 2300: document insertion/update\n\n - 2301: edge insertion/update\n\n - 2302: document/edge deletion\n\n\u003e **INFO:**\nThere will be no distinction between inserts and updates when calling this method.\n", - "operationId": "getReplicationDump", + "description": "fetches all existing tasks on the server\n", + "operationId": "listTasks", "parameters": [ { - "description": "The name or id of the collection to dump.\n", - "in": "query", - "name": "collection", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } - }, - { - "description": "Approximate maximum size of the returned result.\n", - "in": "query", - "name": "chunkSize", - "required": false, - "schema": { - "type": "number" - } - }, - { - "description": "The id of the snapshot to use\n", - "in": "query", - "name": "batchId", - "required": true, - "schema": { - "type": "number" - } } ], "responses": { "200": { - "description": "is returned if the request was executed successfully and data was returned. The header\n`x-arango-replication-lastincluded` is set to the tick of the last document returned.\n" - }, - "204": { - "description": "is returned if the request was executed successfully, but there was no content available.\nThe header `x-arango-replication-lastincluded` is `0` in this case.\n" - }, - "404": { - "description": "is returned when the collection could not be found.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" + "content": { + "application/json": { + "schema": { + "description": "a list of all tasks\n", + "items": { + "properties": { + "command": { + "description": "The JavaScript function for this task.\n", + "type": "string" + }, + "created": { + "description": "The timestamp when this task was created.\n", + "type": "number" + }, + "database": { + "description": "The database this task belongs to.\n", + "type": "string" + }, + "id": { + "description": "A string identifying the task.\n", + "type": "string" + }, + "name": { + "description": "A user-friendly name for the task.\n", + "type": "string" + }, + "offset": { + "description": "Time offset in seconds from the `created` timestamp.\n", + "type": "number" + }, + "period": { + "description": "This task should run each `period` seconds.\n", + "type": "number" + }, + "type": { + "description": "What type of task is this [ `periodic`, `timed`]\n - periodic are tasks that repeat periodically\n - timed are tasks that execute once at a specific time\n", + "type": "string" + } + }, + "required": [ + "name", + "id", + "created", + "type", + "period", + "offset", + "command", + "database" + ], + "type": "object" + }, + "type": "array" + } + } + }, + "description": "The list of tasks\n" } }, - "summary": "Get a replication dump", + "summary": "List all tasks", "tags": [ - "Replication" + "Tasks" ] } }, - "/_api/replication/inventory": { - "get": { - "description": "Returns the array of collections and their indexes, and the array of Views available. These\narrays can be used by replication clients to initiate an initial synchronization with the\nserver.\nThe response will contain all collections, their indexes and views in the requested database\nif `global` is not set, and all collections, indexes and views in all databases if `global`\nis set.\nIn case `global` is not set, it is possible to restrict the response to a single collection\nby setting the `collection` parameter. In this case the response will contain only information\nabout the requested collection in the `collections` array, and no information about views\n(i.e. the `views` response attribute will be an empty array).\n\nThe response will contain a JSON object with the `collections`, `views`, `state` and\n`tick` attributes.\n\n`collections` is an array of collections with the following sub-attributes:\n\n- `parameters`: the collection properties\n\n- `indexes`: an array of the indexes of the collection. Primary indexes and edge indexes\n are not included in this array.\n\nThe `state` attribute contains the current state of the replication logger. It\ncontains the following sub-attributes:\n\n- `running`: whether or not the replication logger is currently active. Note:\n since ArangoDB 2.2, the value will always be `true`\n\n- `lastLogTick`: the value of the last tick the replication logger has written\n\n- `time`: the current time on the server\n\n`views` is an array of available views.\n\nReplication clients should note the `lastLogTick` value returned. They can then\nfetch collections' data using the dump method up to the value of lastLogTick, and\nquery the continuous replication log for log events after this tick value.\n\nTo create a full copy of the collections on the server, a replication client\ncan execute these steps:\n\n- call the `/inventory` API method. This returns the `lastLogTick` value and the\n array of collections and indexes from the server.\n\n- for each collection returned by `/inventory`, create the collection locally and\n call `/dump` to stream the collection data to the client, up to the value of\n `lastLogTick`.\n After that, the client can create the indexes on the collections as they were\n reported by `/inventory`.\n\nIf the clients wants to continuously stream replication log events from the logger\nserver, the following additional steps need to be carried out:\n\n- the client should call `/_api/wal/tail` initially to fetch the first batch of\n replication events that were logged after the client's call to `/inventory`.\n\n The call to `/_api/wal/tail` should use a `from` parameter with the value of the\n `lastLogTick` as reported by `/inventory`. The call to `/_api/wal/tail` will\n return the `x-arango-replication-lastincluded` header which will contain the\n last tick value included in the response.\n\n- the client can then continuously call `/_api/wal/tail` to incrementally fetch new\n replication events that occurred after the last transfer.\n\n Calls should use a `from` parameter with the value of the `x-arango-replication-lastincluded`\n header of the previous response. If there are no more replication events, the\n response will be empty and clients can go to sleep for a while and try again\n later.\n\n\u003e **INFO:**\nOn a Coordinator, this request must have a `DBserver`\nquery parameter which must be an ID of a DB-Server.\nThe very same request is forwarded synchronously to that DB-Server.\nIt is an error if this attribute is not bound in the Coordinator case.\n\n\n\u003e **INFO:**\nUsing the `global` parameter the top-level object contains a key `databases`\nunder which each key represents a database name, and the value conforms to the above description.\n", - "operationId": "getReplicationInventory", + "/_db/{database-name}/_api/tasks/{id}": { + "delete": { + "description": "Deletes the task identified by `id` on the server.\n", + "operationId": "deleteTask", "parameters": [ { - "description": "Include system collections in the result. The default value is `true`.\n", - "in": "query", - "name": "includeSystem", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "Include all databases in the response. Only works on `_system` The default value is `false`.\n", - "in": "query", - "name": "global", - "required": false, - "schema": { - "type": "boolean" - } - }, - { - "description": "A valid batchId is required for this API call\n", - "in": "query", - "name": "batchId", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has administrate access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { - "type": "number" + "type": "string" } }, { - "description": "If this parameter is set, the response will be restricted to a single collection (the one\nspecified), and no views will be returned. This can be used as an optimization to reduce\nthe size of the response.\n", - "in": "query", - "name": "collection", - "required": false, + "description": "The id of the task to delete.\n", + "in": "path", + "name": "id", + "required": true, "schema": { "type": "string" } @@ -19365,256 +24879,204 @@ ], "responses": { "200": { - "description": "is returned if the request was executed successfully.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" - } - }, - "summary": "Get a replication inventory", - "tags": [ - "Replication" - ] - } - }, - "/_api/replication/logger-first-tick": { - "get": { - "description": "Returns the first available tick value that can be served from the server's\nreplication log. This method can be called by replication clients after to\ndetermine if certain data (identified by a tick value) is still available\nfor replication.\n\nThe result is a JSON object containing the attribute `firstTick`. This\nattribute contains the minimum tick value available in the server's\nreplication\nlog.\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", - "operationId": "getReplicationLoggerFirstTick", - "responses": { - "200": { - "description": "is returned if the request was executed successfully.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The status code, 200 in this case.\n", + "type": "number" + }, + "error": { + "description": "`false` in this case\n", + "type": "boolean" + } + }, + "required": [ + "code", + "error" + ], + "type": "object" + } + } + }, + "description": "If the task was deleted, *HTTP 200* is returned.\n" }, - "501": { - "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The status code, 404 in this case.\n", + "type": "number" + }, + "error": { + "description": "`true` in this case\n", + "type": "boolean" + }, + "errorMessage": { + "description": "A plain text message stating what went wrong.\n", + "type": "string" + } + }, + "required": [ + "code", + "error", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "If the task `id` is unknown, then an *HTTP 404* is returned.\n" } }, - "summary": "Get the first available replication tick value", + "summary": "Delete a task", "tags": [ - "Replication" + "Tasks" ] - } - }, - "/_api/replication/logger-follow": { + }, "get": { - "description": "\u003e **WARNING:**\nThis route should no longer be used.\nIt is considered as deprecated from version 3.4.0 on. Client applications\nshould use the REST API endpoint `/_api/wal/tail` instead.\n\n\nReturns data from the server's replication log. This method can be called\nby replication clients after an initial synchronization of data. The method\nwill return all \"recent\" log entries from the logger server, and the clients\ncan replay and apply these entries locally so they get to the same data\nstate as the logger server.\n\nClients can call this method repeatedly to incrementally fetch all changes\nfrom the logger server. In this case, they should provide the `from` value so\nthey will only get returned the log events since their last fetch.\n\nWhen the `from` query parameter is not used, the logger server will return log\nentries starting at the beginning of its replication log. When the `from`\nparameter is used, the logger server will only return log entries which have\nhigher tick values than the specified `from` value (note: the log entry with a\ntick value equal to `from` will be excluded). Use the `from` value when\nincrementally fetching log data.\n\nThe `to` query parameter can be used to optionally restrict the upper bound of\nthe result to a certain tick value. If used, the result will contain only log events\nwith tick values up to (including) `to`. In incremental fetching, there is no\nneed to use the `to` parameter. It only makes sense in special situations,\nwhen only parts of the change log are required.\n\nThe `chunkSize` query parameter can be used to control the size of the result.\nIt must be specified in bytes. The `chunkSize` value will only be honored\napproximately. Otherwise a too low `chunkSize` value could cause the server\nto not be able to put just one log entry into the result and return it.\nTherefore, the `chunkSize` value will only be consulted after a log entry has\nbeen written into the result. If the result size is then greater than\n`chunkSize`, the server will respond with as many log entries as there are\nin the response already. If the result size is still less than `chunkSize`,\nthe server will try to return more data if there's more data left to return.\n\nIf `chunkSize` is not specified, some server-side default value will be used.\n\nThe `Content-Type` of the result is `application/x-arango-dump`. This is an\neasy-to-process format, with all log events going onto separate lines in the\nresponse body. Each log event itself is a JSON object, with at least the\nfollowing attributes:\n\n- `tick`: the log event tick value\n\n- `type`: the log event type\n\nIndividual log events will also have additional attributes, depending on the\nevent type. A few common attributes which are used for multiple events types\nare:\n\n- `cid`: id of the collection the event was for\n\n- `tid`: id of the transaction the event was contained in\n\n- `key`: document key\n\n- `rev`: document revision id\n\n- `data`: the original document data\n\nThe response will also contain the following HTTP headers:\n\n- `x-arango-replication-active`: whether or not the logger is active. Clients\n can use this flag as an indication for their polling frequency. If the\n logger is not active and there are no more replication events available, it\n might be sensible for a client to abort, or to go to sleep for a long time\n and try again later to check whether the logger has been activated.\n\n- `x-arango-replication-lastincluded`: the tick value of the last included\n value in the result. In incremental log fetching, this value can be used\n as the `from` value for the following request. **Note** that if the result is\n empty, the value will be `0`. This value should not be used as `from` value\n by clients in the next request (otherwise the server would return the log\n events from the start of the log again).\n\n- `x-arango-replication-lasttick`: the last tick value the logger server has\n logged (not necessarily included in the result). By comparing the last\n tick and last included tick values, clients have an approximate indication of\n how many events there are still left to fetch.\n\n- `x-arango-replication-checkmore`: whether or not there already exists more\n log data which the client could fetch immediately. If there is more log data\n available, the client could call `logger-follow` again with an adjusted `from`\n value to fetch remaining log entries until there are no more.\n\n If there isn't any more log data to fetch, the client might decide to go\n to sleep for a while before calling the logger again.\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", - "operationId": "getReplicationLoggerFollow", + "description": "fetches one existing task on the server specified by `id`\n", + "operationId": "getTask", "parameters": [ { - "description": "Exclusive lower bound tick value for results.\n", - "in": "query", - "name": "from", - "required": false, - "schema": { - "type": "number" - } - }, - { - "description": "Inclusive upper bound tick value for results.\n", - "in": "query", - "name": "to", - "required": false, - "schema": { - "type": "number" - } - }, - { - "description": "Approximate maximum size of the returned result.\n", - "in": "query", - "name": "chunkSize", - "required": false, + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, "schema": { - "type": "number" + "type": "string" } }, { - "description": "Include system collections in the result. The default value is `true`.\n", - "in": "query", - "name": "includeSystem", - "required": false, + "description": "The id of the task to fetch.\n", + "in": "path", + "name": "id", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], "responses": { "200": { - "description": "is returned if the request was executed successfully, and there are log\nevents available for the requested range. The response body will not be empty\nin this case.\n" - }, - "204": { - "description": "is returned if the request was executed successfully, but there are no log\nevents available for the requested range. The response body will be empty\nin this case.\n" - }, - "400": { - "description": "is returned if either the `from` or `to` values are invalid.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" - }, - "501": { - "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" - } - }, - "summary": "Get replication log entries (deprecated)", - "tags": [ - "Replication" - ] - } - }, - "/_api/replication/logger-state": { - "get": { - "description": "Returns the current state of the server's replication logger. The state will\ninclude information about whether the logger is running and about the last\nlogged tick value. This tick value is important for incremental fetching of\ndata.\n\nThe body of the response contains a JSON object with the following\nattributes:\n\n- `state`: the current logger state as a JSON object with the following\n sub-attributes:\n\n - `running`: whether or not the logger is running\n\n - `lastLogTick`: the tick value of the latest tick the logger has logged.\n This value can be used for incremental fetching of log data.\n\n - `totalEvents`: total number of events logged since the server was started.\n The value is not reset between multiple stops and re-starts of the logger.\n\n - `time`: the current date and time on the logger server\n\n- `server`: a JSON object with the following sub-attributes:\n\n - `version`: the logger server's version\n\n - `serverId`: the logger server's id\n\n- `clients`: returns the last fetch status by replication clients connected to\n the logger. Each client is returned as a JSON object with the following attributes:\n\n - `syncerId`: id of the client syncer\n\n - `serverId`: server id of client\n\n - `lastServedTick`: last tick value served to this client via the WAL tailing API\n\n - `time`: date and time when this client last called the WAL tailing API\n", - "operationId": "getReplicationLoggerState", - "responses": { - "200": { - "description": "is returned if the logger state could be determined successfully.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if the logger state could not be determined.\n" + "content": { + "application/json": { + "schema": { + "description": "The function in question\n", + "properties": { + "command": { + "description": "The JavaScript function for this task.\n", + "type": "string" + }, + "created": { + "description": "The timestamp when this task was created.\n", + "type": "number" + }, + "database": { + "description": "The database this task belongs to.\n", + "type": "string" + }, + "id": { + "description": "A string identifying the task.\n", + "type": "string" + }, + "name": { + "description": "A user-friendly name for the task.\n", + "type": "string" + }, + "offset": { + "description": "Time offset in seconds from the `created` timestamp.\n", + "type": "number" + }, + "period": { + "description": "This task should run each `period` seconds.\n", + "type": "number" + }, + "type": { + "description": "What type of task is this [ `periodic`, `timed`]\n - periodic are tasks that repeat periodically\n - timed are tasks that execute once at a specific time\n", + "type": "string" + } + }, + "required": [ + "name", + "id", + "created", + "type", + "period", + "offset", + "command", + "database" + ], + "type": "object" + } + } + }, + "description": "The requested task\n" } }, - "summary": "Get the replication logger state", + "summary": "Get a task", "tags": [ - "Replication" + "Tasks" ] - } - }, - "/_api/replication/logger-tick-ranges": { - "get": { - "description": "Returns the currently available ranges of tick values for all currently\navailable WAL logfiles. The tick values can be used to determine if certain\ndata (identified by tick value) are still available for replication.\n\nThe body of the response contains a JSON array. Each array member is an\nobject\nthat describes a single logfile. Each object has the following attributes:\n\n- `datafile`: name of the logfile\n\n- `status`: status of the datafile, in textual form (e.g. \"sealed\", \"open\")\n\n- `tickMin`: minimum tick value contained in logfile\n\n- `tickMax`: maximum tick value contained in logfile\n", - "operationId": "getReplicationLoggerTickRanges", - "responses": { - "200": { - "description": "is returned if the tick ranges could be determined successfully.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if the logger state could not be determined.\n" + }, + "put": { + "description": "Registers a new task with the specified ID.\n\nNot compatible with load balancers.\n", + "operationId": "createTaskWithId", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } }, - "501": { - "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" + { + "description": "The id of the task to create\n", + "in": "path", + "name": "id", + "required": true, + "schema": { + "type": "string" + } } - }, - "summary": "Get the tick ranges available in the WAL logfiles", - "tags": [ - "Replication" - ] - } - }, - "/_api/replication/make-follower": { - "put": { - "description": "\u003e **WARNING:**\nCalling this endpoint will synchronize data from the collections found on the\nremote leader to the local ArangoDB database. All data in the local collections\nwill be purged and replaced with data from the leader. Use with caution!\n\n\n\u003e **INFO:**\nThis command may take a long time to complete and return. This is because it\nwill first do a full data synchronization with the leader, which will take time\nroughly proportional to the amount of data.\n\n\nChanges the role to a follower and starts a full data synchronization from a\nremote endpoint into the local ArangoDB database and afterwards starts the\ncontinuous replication.\n\nThe operation works on a per-database level.\n\nAll local database data will be removed prior to the synchronization.\n\nIn case of success, the body of the response is a JSON object with the following\nattributes:\n\n- `state`: a JSON object with the following sub-attributes:\n\n - `running`: whether or not the applier is active and running\n\n - `lastAppliedContinuousTick`: the last tick value from the continuous\n replication log the applier has applied.\n\n - `lastProcessedContinuousTick`: the last tick value from the continuous\n replication log the applier has processed.\n\n Regularly, the last applied and last processed tick values should be\n identical. For transactional operations, the replication applier will first\n process incoming log events before applying them, so the processed tick\n value might be higher than the applied tick value. This will be the case\n until the applier encounters the *transaction commit* log event for the\n transaction.\n\n - `lastAvailableContinuousTick`: the last tick value the remote server can\n provide.\n\n - `ticksBehind`: this attribute will be present only if the applier is currently\n running. It will provide the number of log ticks between what the applier\n has applied/seen and the last log tick value provided by the remote server.\n If this value is zero, then both servers are in sync. If this is non-zero,\n then the remote server has additional data that the applier has not yet\n fetched and processed, or the remote server may have more data that is not\n applicable to the applier.\n\n Client applications can use it to determine approximately how far the applier\n is behind the remote server, and can periodically check if the value is\n increasing (applier is falling behind) or decreasing (applier is catching up).\n\n Please note that as the remote server will only keep one last log tick value\n for all of its databases, but replication may be restricted to just certain\n databases on the applier, this value is more meaningful when the global applier\n is used.\n Additionally, the last log tick provided by the remote server may increase\n due to writes into system collections that are not replicated due to replication\n configuration. So the reported value may exaggerate the reality a bit for\n some scenarios.\n\n - `time`: the time on the applier server.\n\n - `totalRequests`: the total number of requests the applier has made to the\n endpoint.\n\n - `totalFailedConnects`: the total number of failed connection attempts the\n applier has made.\n\n - `totalEvents`: the total number of log events the applier has processed.\n\n - `totalOperationsExcluded`: the total number of log events excluded because\n of `restrictCollections`.\n\n - `progress`: a JSON object with details about the replication applier progress.\n It contains the following sub-attributes if there is progress to report:\n\n - `message`: a textual description of the progress\n\n - `time`: the date and time the progress was logged\n\n - `failedConnects`: the current number of failed connection attempts\n\n - `lastError`: a JSON object with details about the last error that happened on\n the applier. It contains the following sub-attributes if there was an error:\n\n - `errorNum`: a numerical error code\n\n - `errorMessage`: a textual error description\n\n - `time`: the date and time the error occurred\n\n In case no error has occurred, `lastError` will be empty.\n\n- `server`: a JSON object with the following sub-attributes:\n\n - `version`: the applier server's version\n\n - `serverId`: the applier server's id\n\n- `endpoint`: the endpoint the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\n- `database`: the name of the database the applier is connected to (if applier is\n active) or will connect to (if applier is currently inactive)\n\nPlease note that all \"tick\" values returned do not have a specific unit. Tick\nvalues are only meaningful when compared to each other. Higher tick values mean\n\"later in time\" than lower tick values.\n\n\u003e **INFO:**\nThis endpoint is not supported on a Coordinator in a cluster deployment.\n", - "operationId": "makeReplicationFollower", + ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "adaptivePolling": { - "description": "whether or not the replication applier will use adaptive polling.\n", - "type": "boolean" - }, - "autoResync": { - "description": "whether or not the follower should perform an automatic resynchronization with\nthe leader in case the leader cannot serve log data requested by the follower,\nor when the replication is started and no tick value can be found.\n", - "type": "boolean" - }, - "autoResyncRetries": { - "description": "number of resynchronization retries that will be performed in a row when\nautomatic resynchronization is enabled and kicks in. Setting this to `0` will\neffectively disable `autoResync`. Setting it to some other value will limit\nthe number of retries that are performed. This helps preventing endless retries\nin case resynchronizations always fail.\n", - "type": "integer" - }, - "chunkSize": { - "description": "the requested maximum size for log transfer packets that\nis used when the endpoint is contacted.\n", - "type": "integer" - }, - "connectTimeout": { - "description": "the timeout (in seconds) when attempting to connect to the\nendpoint. This value is used for each connection attempt.\n", - "type": "integer" - }, - "connectionRetryWaitTime": { - "description": "the time (in seconds) that the applier will intentionally idle before\nit retries connecting to the leader in case of connection problems.\nThis value will be ignored if set to `0`.\n", - "type": "integer" - }, - "database": { - "description": "the database name on the leader (if not specified, defaults to the\nname of the local current database).\n", + "command": { + "description": "The JavaScript code to be executed\n", "type": "string" }, - "endpoint": { - "description": "the leader endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").\n", + "name": { + "description": "The name of the task\n", "type": "string" }, - "idleMaxWaitTime": { - "description": "the maximum wait time (in seconds) that the applier will intentionally idle\nbefore fetching more log data from the leader in case the leader has\nalready sent all its log data and there have been previous log fetch attempts\nthat resulted in no more log data. This wait time can be used to control the\nmaximum frequency with which the replication applier sends HTTP log fetch\nrequests to the leader in case there is no write activity on the leader for\nlonger periods. This configuration value will only be used if the option\n`adaptivePolling` is set to `true`.\nThis value will be ignored if set to `0`.\n", - "type": "integer" - }, - "idleMinWaitTime": { - "description": "the minimum wait time (in seconds) that the applier will intentionally idle\nbefore fetching more log data from the leader in case the leader has\nalready sent all its log data. This wait time can be used to control the\nfrequency with which the replication applier sends HTTP log fetch requests\nto the leader in case there is no write activity on the leader.\nThis value will be ignored if set to `0`.\n", - "type": "integer" - }, - "includeSystem": { - "description": "whether or not system collection operations will be applied\n", - "type": "boolean" - }, - "initialSyncMaxWaitTime": { - "description": "the maximum wait time (in seconds) that the initial synchronization will\nwait for a response from the leader when fetching initial collection data.\nThis wait time can be used to control after what time the initial synchronization\nwill give up waiting for a response and fail. This value is relevant even\nfor continuous replication when `autoResync` is set to `true` because this\nmay re-start the initial synchronization when the leader cannot provide\nlog data the follower requires.\nThis value will be ignored if set to `0`.\n", - "type": "integer" - }, - "maxConnectRetries": { - "description": "the maximum number of connection attempts the applier\nwill make in a row. If the applier cannot establish a connection to the\nendpoint in this number of attempts, it will stop itself.\n", + "offset": { + "description": "Number of seconds initial delay\n", "type": "integer" }, - "password": { - "description": "the password to use when connecting to the leader.\n", + "params": { + "description": "The parameters to be passed into command\n", "type": "string" }, - "requestTimeout": { - "description": "the timeout (in seconds) for individual requests to the endpoint.\n", + "period": { + "description": "number of seconds between the executions\n", "type": "integer" - }, - "requireFromPresent": { - "description": "if set to `true`, then the replication applier will check\nat start of its continuous replication if the start tick from the dump phase\nis still present on the leader. If not, then there would be data loss. If\n`requireFromPresent` is `true`, the replication applier will abort with an\nappropriate error message. If set to `false`, then the replication applier will\nstill start, and ignore the data loss.\n", - "type": "boolean" - }, - "restrictCollections": { - "description": "an optional array of collections for use with `restrictType`.\nIf `restrictType` is `include`, only the specified collections\nwill be synchronized. If `restrictType` is `exclude`, all but the specified\ncollections will be synchronized.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "restrictType": { - "description": "an optional string value for collection filtering. When\nspecified, the allowed values are `include` or `exclude`.\n", - "type": "string" - }, - "username": { - "description": "an optional ArangoDB username to use when connecting to the leader.\n", - "type": "string" - }, - "verbose": { - "description": "if set to `true`, then a log line will be emitted for all operations\nperformed by the replication applier. This should be used for debugging\nreplication\nproblems only.\n", - "type": "boolean" } }, "required": [ - "endpoint", - "database", - "password", - "includeSystem" + "name", + "command", + "params" ], "type": "object" } @@ -19622,362 +25084,370 @@ } }, "responses": { - "200": { - "description": "is returned if the request was executed successfully.\n" - }, "400": { - "description": "is returned if the configuration is incomplete or malformed.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred during synchronization or when starting the\ncontinuous replication.\n" - }, - "501": { - "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" + "description": "If the task `id` already exists or the rest body is not accurate, *HTTP 400* is returned.\n" } }, - "summary": "Turn a server into a follower of another", + "summary": "Create a task with ID", "tags": [ - "Replication" + "Tasks" ] } }, - "/_api/replication/revisions/documents": { - "put": { - "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with collections\ncreated in ArangoDB v3.8.0 or later.\n\n\nReturns documents by revision for replication.\n\nThe body of the request should be JSON/VelocyPack and should consist of an\narray of string-encoded revision IDs:\n\n```\n[\n \u003cString, revision\u003e,\n \u003cString, revision\u003e,\n ...\n \u003cString, revision\u003e\n]\n```\n\nIn particular, the revisions should be sorted in ascending order of their\ndecoded values.\n\nThe result will be a JSON/VelocyPack array of document objects. If there is no\ndocument corresponding to a particular requested revision, an empty object will\nbe returned in its place.\n\nThe response may be truncated if it would be very long. In this case, the\nresponse array length will be less than the request array length, and\nsubsequent requests can be made for the omitted documents.\n\nEach `\u003cString, revision\u003e` value type is a 64-bit value encoded as a string of\n11 characters, using the same encoding as our document `_rev` values. The\nreason for this is that 64-bit values cannot necessarily be represented in full\nin JavaScript, as it handles all numbers as floating point, and can only\nrepresent up to `2^53-1` faithfully.\n", - "operationId": "listReplicationRevisionDocuments", + "/_db/{database-name}/_api/transaction": { + "get": { + "description": "The result is an object with the `transactions` attribute, which contains\nan array of transactions.\nIn a cluster the array will contain the transactions from all Coordinators.\n\nEach array entry contains an object with the following attributes:\n\n- `id`: the transaction's id\n- `state`: the transaction's status\n", + "operationId": "listStreamTransactions", "parameters": [ { - "description": "The name or id of the collection to query.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } - }, + } + ], + "responses": { + "200": { + "description": "If the list of transactions can be retrieved successfully, *HTTP 200* will be returned.\n" + } + }, + "summary": "List the running Stream Transactions", + "tags": [ + "Transactions" + ] + }, + "post": { + "description": "\u003e **WARNING:**\nJavaScript Transactions are deprecated from v3.12.0 onward and will be\nremoved in a future version.\n\n\nThe transaction description must be passed in the body of the POST request.\n\nIf the transaction is fully executed and committed on the server,\n*HTTP 200* will be returned. Additionally, the return value of the\ncode defined in `action` will be returned in the `result` attribute.\n\nFor successfully committed transactions, the returned JSON object has the\nfollowing properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: the return value of the transaction\n\nIf the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n\nIf a transaction fails to commit, either by an exception thrown in the\n`action` code, or by an internal error, the server will respond with\nan error.\nAny other errors will be returned with any of the return codes\n*HTTP 400*, *HTTP 409*, or *HTTP 500*.\n", + "operationId": "executeJavaScriptTransaction", + "parameters": [ { - "description": "The id of the snapshot to use\n", - "in": "query", - "name": "batchId", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { - "type": "number" + "type": "string" } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "action": { + "description": "the actual transaction operations to be executed, in the\nform of stringified JavaScript code. The code will be executed on server\nside, with late binding. It is thus critical that the code specified in\n`action` properly sets up all the variables it needs.\nIf the code specified in `action` ends with a return statement, the\nvalue returned will also be returned by the REST API in the `result`\nattribute if the transaction committed successfully.\n", + "type": "string" + }, + "allowImplicit": { + "description": "Allow reading from undeclared collections.\n", + "type": "boolean" + }, + "collections": { + "description": "`collections` must be a JSON object that can have one or all sub-attributes\n`read`, `write` or `exclusive`, each being an array of collection names or a\nsingle collection name as string. Collections that will be written to in the\ntransaction must be declared with the `write` or `exclusive` attribute or it\nwill fail, whereas non-declared collections from which is solely read will be\nadded lazily. The optional sub-attribute `allowImplicit` can be set to `false`\nto let transactions fail in case of undeclared collections for reading.\nCollections for reading should be fully declared if possible, to avoid\ndeadlocks.\n", + "type": "string" + }, + "lockTimeout": { + "description": "an optional numeric value that can be used to set a\ntimeout in seconds for waiting on collection locks. This option is only\nmeaningful when using exclusive locks. If not specified, a default value of\n900 seconds will be used. Setting `lockTimeout` to `0` will make ArangoDB\nnot time out waiting for a lock.\n", + "type": "integer" + }, + "maxTransactionSize": { + "description": "Transaction size limit in bytes.\n", + "type": "integer" + }, + "params": { + "description": "optional arguments passed to `action`.\n", + "type": "string" + }, + "waitForSync": { + "description": "an optional boolean flag that, if set, will force the\ntransaction to write all data to disk before returning.\n", + "type": "boolean" + } + }, + "required": [ + "collections", + "action" + ], + "type": "object" + } + } + } + }, "responses": { "200": { - "description": "is returned if the request was executed successfully and data was returned.\n" - }, - "401": { - "description": "is returned if necessary parameters are missing or incorrect\n" + "description": "If the transaction is fully executed and committed on the server,\n*HTTP 200* will be returned.\n" }, - "404": { - "description": "is returned when the collection or snapshot could not be found.\n" + "400": { + "description": "If the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400*.\n" }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" + "404": { + "description": "If the transaction specification contains an unknown collection, the server\nwill respond with *HTTP 404*.\n" }, "500": { - "description": "is returned if an error occurred while assembling the response.\n" - }, - "501": { - "description": "is returned if called on a collection which doesn't support sync-by-revision\n" + "description": "Exceptions thrown by users will make the server respond with a return code of\n*HTTP 500*\n" } }, - "summary": "Get documents by revision", + "summary": "Execute a JavaScript Transaction", "tags": [ - "Replication" + "Transactions" ] } }, - "/_api/replication/revisions/ranges": { - "put": { - "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with the RocksDB\nengine, and with collections created in ArangoDB v3.8.0 or later.\n\n\nReturns the revision IDs of documents within the requested ranges.\n\nThe body of the request should be JSON/VelocyPack and should consist of an\narray of pairs of string-encoded revision IDs:\n\n```\n[\n [\u003cString, revision\u003e, \u003cString, revision\u003e],\n [\u003cString, revision\u003e, \u003cString, revision\u003e],\n ...\n [\u003cString, revision\u003e, \u003cString, revision\u003e]\n]\n```\n\nIn particular, the pairs should be non-overlapping, and sorted in ascending\norder of their decoded values.\n\nThe result will be JSON/VelocyPack in the following format:\n```\n{\n ranges: [\n [\u003cString, revision\u003e, \u003cString, revision\u003e, ... \u003cString, revision\u003e],\n [\u003cString, revision\u003e, \u003cString, revision\u003e, ... \u003cString, revision\u003e],\n ...,\n [\u003cString, revision\u003e, \u003cString, revision\u003e, ... \u003cString, revision\u003e]\n ]\n resume: \u003cString, revision\u003e\n}\n```\n\nThe `resume` field is optional. If specified, then the response is to be\nconsidered partial, only valid through the revision specified. A subsequent\nrequest should be made with the same request body, but specifying the `resume`\nURL parameter with the value specified. The subsequent response will pick up\nfrom the appropriate request pair, and omit any complete ranges or revisions\nwhich are less than the requested resume revision. As an example (ignoring the\nstring-encoding for a moment), if ranges `[1, 3], [5, 9], [12, 15]` are\nrequested, then a first response may return `[], [5, 6]` with a resume point of\n`7` and a subsequent response might be `[8], [12, 13]`.\n\nIf a requested range contains no revisions, then an empty array is returned.\nEmpty ranges will not be omitted.\n\nEach `\u003cString, revision\u003e` value type is a 64-bit value encoded as a string of\n11 characters, using the same encoding as our document `_rev` values. The\nreason for this is that 64-bit values cannot necessarily be represented in full\nin JavaScript, as it handles all numbers as floating point, and can only\nrepresent up to `2^53-1` faithfully.\n", - "operationId": "listReplicationRevisionRanges", + "/_db/{database-name}/_api/transaction/begin": { + "post": { + "description": "Begin a Stream Transaction that allows clients to call selected APIs over a\nshort period of time, referencing the transaction ID, and have the server\nexecute the operations transactionally.\n\nCommitting or aborting a running transaction must be done by the client.\nIt is bad practice to not commit or abort a transaction once you are done\nusing it. It forces the server to keep resources and collection locks\nuntil the entire transaction times out.\n\nThe transaction description must be passed in the body of the POST request.\nIf the transaction can be started on the server, *HTTP 201* will be returned.\n\nFor successfully started transactions, the returned JSON object has the\nfollowing properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: result containing\n - `id`: the identifier of the transaction\n - `status`: containing the string 'running'\n\nIf the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400* or *HTTP 404*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n", + "operationId": "beginStreamTransaction", "parameters": [ { - "description": "The name or id of the collection to query.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The id of the snapshot to use\n", - "in": "query", - "name": "batchId", - "required": true, - "schema": { - "type": "number" - } - }, - { - "description": "The revision at which to resume, if a previous request was truncated\n", - "in": "query", - "name": "resume", + "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThis header decides about dirty reads for the entire transaction. Individual\nread operations, that are performed as part of the transaction, cannot override it.\n", + "in": "header", + "name": "x-arango-allow-dirty-read", "required": false, "schema": { - "type": "string" + "type": "boolean" } } ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "allowImplicit": { + "description": "Allow reading from undeclared collections.\n", + "type": "boolean" + }, + "collections": { + "description": "`collections` must be a JSON object that can have one or all sub-attributes\n`read`, `write` or `exclusive`, each being an array of collection names or a\nsingle collection name as string. Collections that will be written to in the\ntransaction must be declared with the `write` or `exclusive` attribute or it\nwill fail, whereas non-declared collections from which is solely read will be\nadded lazily.\n", + "type": "object" + }, + "lockTimeout": { + "description": "an optional numeric value that can be used to set a\ntimeout in seconds for waiting on collection locks. This option is only\nmeaningful when using exclusive locks. If not specified, a default\nvalue will be used. Setting `lockTimeout` to `0` will make ArangoDB\nnot time out waiting for a lock.\n", + "type": "integer" + }, + "maxTransactionSize": { + "description": "Transaction size limit in bytes.\n", + "type": "integer" + }, + "skipFastLockRound": { + "default": false, + "description": "Whether to disable fast locking for write operations.\n\nSkipping the fast lock round can be faster overall if there are many concurrent\nStream Transactions queued that all try to lock the same collection exclusively.\nIt avoids deadlocking and retrying which can occur with the fast locking by\nguaranteeing a deterministic locking order at the expense of each actual\nlocking operation taking longer.\n\nFast locking should not be skipped for read-only Stream Transactions because\nit degrades performance if there are no concurrent transactions that use\nexclusive locks on the same collection.\n", + "type": "boolean" + }, + "waitForSync": { + "description": "an optional boolean flag that, if set, will force the\ntransaction to write all data to disk before returning.\n", + "type": "boolean" + } + }, + "required": [ + "collections" + ], + "type": "object" + } + } + } + }, "responses": { - "200": { - "description": "is returned if the request was executed successfully and data was returned.\n" + "201": { + "description": "If the transaction is running on the server,\n*HTTP 201* will be returned.\n" }, - "401": { - "description": "is returned if necessary parameters are missing or incorrect\n" + "400": { + "description": "If the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400*.\n" }, "404": { - "description": "is returned when the collection or snapshot could not be found.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" - }, - "501": { - "description": "is returned if called on a collection which doesn't support sync-by-revision\n" + "description": "If the transaction specification contains an unknown collection, the server\nwill respond with *HTTP 404*.\n" } }, - "summary": "List document revision IDs within requested ranges", + "summary": "Begin a Stream Transaction", "tags": [ - "Replication" + "Transactions" ] } }, - "/_api/replication/revisions/tree": { - "get": { - "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with collections\ncreated in ArangoDB v3.8.0 or later.\n\n\nReturns the Merkle tree associated with the specified collection.\n\nThe result will be JSON/VelocyPack in the following format:\n```\n{\n version: \u003cNumber\u003e,\n branchingFactor: \u003cNumber\u003e\n maxDepth: \u003cNumber\u003e,\n rangeMin: \u003cString, revision\u003e,\n rangeMax: \u003cString, revision\u003e,\n nodes: [\n { count: \u003cNumber\u003e, hash: \u003cString, revision\u003e },\n { count: \u003cNumber\u003e, hash: \u003cString, revision\u003e },\n ...\n { count: \u003cNumber\u003e, hash: \u003cString, revision\u003e }\n ]\n}\n```\n\nAt the moment, there is only one version, 1, so this can safely be ignored for\nnow.\n\nEach `\u003cString, revision\u003e` value type is a 64-bit value encoded as a string of\n11 characters, using the same encoding as our document `_rev` values. The\nreason for this is that 64-bit values cannot necessarily be represented in full\nin JavaScript, as it handles all numbers as floating point, and can only\nrepresent up to `2^53-1` faithfully.\n\nThe node count should correspond to a full tree with the given `maxDepth` and\n`branchingFactor`. The nodes are laid out in level-order tree traversal, so the\nroot is at index `0`, its children at indices `[1, branchingFactor]`, and so\non.\n", - "operationId": "getReplicationRevisionTree", + "/_db/{database-name}/_api/transaction/{transaction-id}": { + "delete": { + "description": "Abort a running server-side transaction. Aborting is an idempotent operation.\nIt is not an error to abort a transaction more than once.\n\nIf the transaction can be aborted, *HTTP 200* will be returned.\nThe returned JSON object has the following properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: result containing\n - `id`: the identifier of the transaction\n - `status`: containing the string 'aborted'\n\nIf the transaction cannot be found, aborting is not allowed or the\ntransaction was already committed, the server\nwill respond with *HTTP 400*, *HTTP 404* or *HTTP 409*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n", + "operationId": "abortStreamTransaction", "parameters": [ { - "description": "The name or id of the collection to query.\n", - "in": "query", - "name": "collection", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The id of the snapshot to use\n", - "in": "query", - "name": "batchId", + "description": "The transaction identifier,\n", + "in": "path", + "name": "transaction-id", "required": true, "schema": { - "type": "number" + "type": "string" } } ], "responses": { "200": { - "description": "is returned if the request was executed successfully and data was returned.\n" + "description": "If the transaction was aborted,\n*HTTP 200* will be returned.\n" }, - "401": { - "description": "is returned if necessary parameters are missing\n" + "400": { + "description": "If the transaction cannot be aborted, the server\nwill respond with *HTTP 400*.\n" }, "404": { - "description": "is returned when the collection or snapshot could not be found.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" + "description": "If the transaction was not found, the server\nwill respond with *HTTP 404*.\n" }, - "501": { - "description": "is returned if called on a collection which doesn't support sync-by-revision\n" + "409": { + "description": "If the transaction was already committed, the server\nwill respond with *HTTP 409*.\n" } }, - "summary": "Get the replication revision tree", + "summary": "Abort a Stream Transaction", "tags": [ - "Replication" + "Transactions" ] }, - "post": { - "description": "\u003e **WARNING:**\nThis revision-based replication endpoint will only work with collections\ncreated in ArangoDB v3.8.0 or later.\n\n\nRebuilds the Merkle tree for a collection.\n\nIf successful, there will be no return body.\n", - "operationId": "rebuildReplicationRevisionTree", + "get": { + "description": "The result is an object describing the status of the transaction.\nIt has at least the following attributes:\n\n- `id`: the identifier of the transaction\n\n- `status`: the status of the transaction. One of \"running\", \"committed\" or \"aborted\".\n", + "operationId": "getStreamTransaction", "parameters": [ { - "description": "The name or id of the collection to query.\n", - "in": "query", - "name": "collection", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "204": { - "description": "is returned if the request was executed successfully.\n" - }, - "401": { - "description": "is returned if necessary parameters are missing\n" - }, - "404": { - "description": "is returned when the collection or could not be found.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" - }, - "501": { - "description": "is returned if called on a collection which doesn't support sync-by-revision\n" - } - }, - "summary": "Rebuild the replication revision tree", - "tags": [ - "Replication" - ] - } - }, - "/_api/replication/server-id": { - "get": { - "description": "Returns the servers id. The id is also returned by other replication API\nmethods, and this method is an easy means of determining a server's id.\n\nThe body of the response is a JSON object with the attribute `serverId`. The\nserver id is returned as a string.\n", - "operationId": "getReplicationServerId", - "responses": { - "200": { - "description": "is returned if the request was executed successfully.\n" - }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" - }, - "500": { - "description": "is returned if an error occurred while assembling the response.\n" - } - }, - "summary": "Get the replication server ID", - "tags": [ - "Replication" - ] - } - }, - "/_api/replication/sync": { - "put": { - "description": "Starts a full data synchronization from a remote endpoint into the local\nArangoDB database.\n\nThe *sync* method can be used by replication clients to connect an ArangoDB database\nto a remote endpoint, fetch the remote list of collections and indexes, and collection\ndata. It will thus create a local backup of the state of data at the remote ArangoDB\ndatabase. *sync* works on a per-database level.\n\n*sync* will first fetch the list of collections and indexes from the remote endpoint.\nIt does so by calling the *inventory* API of the remote database. It will then purge\ndata in the local ArangoDB database, and after start will transfer collection data\nfrom the remote database to the local ArangoDB database. It will extract data from the\nremote database by calling the remote database's *dump* API until all data are fetched.\n\nIn case of success, the body of the response is a JSON object with the following\nattributes:\n\n- *collections*: an array of collections that were transferred from the endpoint\n\n- *lastLogTick*: the last log tick on the endpoint at the time the transfer\n was started. Use this value as the *from* value when starting the continuous\n synchronization later.\n\nWARNING: calling this method will synchronize data from the collections found\non the remote endpoint to the local ArangoDB database. All data in the local\ncollections will be purged and replaced with data from the endpoint.\n\nUse with caution!\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", - "operationId": "startReplicationSync", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "database": { - "description": "the database name on the leader (if not specified, defaults to the\nname of the local current database).\n", - "type": "string" - }, - "endpoint": { - "description": "the leader endpoint to connect to (e.g. \"tcp://192.168.173.13:8529\").\n", - "type": "string" - }, - "includeSystem": { - "description": "whether or not system collection operations will be applied\n", - "type": "boolean" - }, - "incremental": { - "description": "if set to *true*, then an incremental synchronization method will be used\nfor synchronizing data in collections. This method is useful when\ncollections already exist locally, and only the remaining differences need\nto be transferred from the remote endpoint. In this case, the incremental\nsynchronization can be faster than a full synchronization.\nThe default value is *false*, meaning that the complete data from the remote\ncollection will be transferred.\n", - "type": "boolean" - }, - "initialSyncMaxWaitTime": { - "description": "the maximum wait time (in seconds) that the initial synchronization will\nwait for a response from the leader when fetching initial collection data.\nThis wait time can be used to control after what time the initial synchronization\nwill give up waiting for a response and fail.\nThis value will be ignored if set to *0*.\n", - "type": "integer" - }, - "password": { - "description": "the password to use when connecting to the endpoint.\n", - "type": "string" - }, - "restrictCollections": { - "description": "an optional array of collections for use with\n*restrictType*. If *restrictType* is *include*, only the specified collections\nwill be synchronized. If *restrictType* is *exclude*, all but the specified\ncollections will be synchronized.\n", - "items": { - "type": "string" - }, - "type": "array" - }, - "restrictType": { - "description": "an optional string value for collection filtering. When\nspecified, the allowed values are *include* or *exclude*.\n", - "type": "string" - }, - "username": { - "description": "an optional ArangoDB username to use when connecting to the endpoint.\n", - "type": "string" - } - }, - "required": [ - "endpoint", - "password" - ], - "type": "object" - } + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The transaction identifier.\n", + "in": "path", + "name": "transaction-id", + "required": true, + "schema": { + "type": "string" } } - }, + ], "responses": { "200": { - "description": "is returned if the request was executed successfully.\n" + "description": "If the transaction is fully executed and committed on the server,\n*HTTP 200* will be returned.\n" }, "400": { - "description": "is returned if the configuration is incomplete or malformed.\n" + "description": "If the transaction identifier specified is either missing or malformed, the server\nwill respond with *HTTP 400*.\n" }, - "405": { - "description": "is returned when an invalid HTTP method is used.\n" + "404": { + "description": "If the transaction was not found with the specified identifier, the server\nwill respond with *HTTP 404*.\n" + } + }, + "summary": "Get the status of a Stream Transaction", + "tags": [ + "Transactions" + ] + }, + "put": { + "description": "Commit a running server-side transaction. Committing is an idempotent operation.\nIt is not an error to commit a transaction more than once.\n\nIf the transaction can be committed, *HTTP 200* will be returned.\nThe returned JSON object has the following properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: result containing\n - `id`: the identifier of the transaction\n - `status`: containing the string 'committed'\n\nIf the transaction cannot be found, committing is not allowed or the\ntransaction was aborted, the server\nwill respond with *HTTP 400*, *HTTP 404* or *HTTP 409*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n", + "operationId": "commitStreamTransaction", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } }, - "500": { - "description": "is returned if an error occurred during synchronization.\n" + { + "description": "The transaction identifier,\n", + "in": "path", + "name": "transaction-id", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "If the transaction was committed,\n*HTTP 200* will be returned.\n" }, - "501": { - "description": "is returned when this operation is called on a Coordinator in a cluster deployment.\n" + "400": { + "description": "If the transaction cannot be committed, the server\nwill respond with *HTTP 400*.\n" + }, + "404": { + "description": "If the transaction was not found, the server\nwill respond with *HTTP 404*.\n" + }, + "409": { + "description": "If the transaction was already aborted, the server\nwill respond with *HTTP 409*.\n" } }, - "summary": "Start replication from a remote endpoint", + "summary": "Commit a Stream Transaction", "tags": [ - "Replication" + "Transactions" ] } }, - "/_api/tasks": { + "/_db/{database-name}/_api/user": { "post": { - "description": "creates a new task with a generated id\n", - "operationId": "createTask", + "description": "Create a new user. You need server access level *Administrate* in order to\nexecute this REST call.\n", + "operationId": "createUser", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "command": { - "description": "The JavaScript code to be executed\n", - "type": "string" - }, - "name": { - "description": "The name of the task\n", - "type": "string" + "active": { + "description": "An optional flag that specifies whether the user is active. If not\nspecified, this will default to `true`.\n", + "type": "boolean" }, - "offset": { - "description": "Number of seconds initial delay\n", - "type": "integer" + "extra": { + "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", + "type": "object" }, - "params": { - "description": "The parameters to be passed into command\n", + "passwd": { + "description": "The user password as a string. If not specified, it will default to an empty\nstring.\n", "type": "string" }, - "period": { - "description": "number of seconds between the executions\n", - "type": "integer" + "user": { + "description": "The name of the user as a string. This is mandatory.\n", + "type": "string" } }, "required": [ - "name", - "command", - "params" + "user", + "passwd" ], "type": "object" } @@ -19985,154 +25455,80 @@ } }, "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The status code, 200 in this case.\n", - "type": "number" - }, - "command": { - "description": "the javascript function for this task\n", - "type": "string" - }, - "created": { - "description": "The timestamp when this task was created\n", - "type": "number" - }, - "database": { - "description": "the database this task belongs to\n", - "type": "string" - }, - "error": { - "description": "`false` in this case\n", - "type": "boolean" - }, - "id": { - "description": "A string identifying the task\n", - "type": "string" - }, - "offset": { - "description": "time offset in seconds from the created timestamp\n", - "type": "number" - }, - "period": { - "description": "this task should run each `period` seconds\n", - "type": "number" - }, - "type": { - "description": "What type of task is this [ `periodic`, `timed`]\n - periodic are tasks that repeat periodically\n - timed are tasks that execute once at a specific time\n", - "type": "string" - } - }, - "required": [ - "id", - "created", - "type", - "period", - "offset", - "command", - "database", - "code", - "error" - ], - "type": "object" - } - } - }, - "description": "The task was registered\n" + "201": { + "description": "Returned if the user can be added by the server\n" }, "400": { - "description": "If the post body is not accurate, a *HTTP 400* is returned.\n" + "description": "If the JSON representation is malformed or mandatory data is missing\nfrom the request.\n" + }, + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" + }, + "409": { + "description": "Returned if a user with the same name already exists.\n" } }, - "summary": "Create a task", + "summary": "Create a user", "tags": [ - "Tasks" + "Users" ] } }, - "/_api/tasks/": { + "/_db/{database-name}/_api/user/": { "get": { - "description": "fetches all existing tasks on the server\n", - "operationId": "listTasks", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "description": "a list of all tasks\n", - "items": { - "properties": { - "command": { - "description": "The JavaScript function for this task.\n", - "type": "string" - }, - "created": { - "description": "The timestamp when this task was created.\n", - "type": "number" - }, - "database": { - "description": "The database this task belongs to.\n", - "type": "string" - }, - "id": { - "description": "A string identifying the task.\n", - "type": "string" - }, - "name": { - "description": "A user-friendly name for the task.\n", - "type": "string" - }, - "offset": { - "description": "Time offset in seconds from the `created` timestamp.\n", - "type": "number" - }, - "period": { - "description": "This task should run each `period` seconds.\n", - "type": "number" - }, - "type": { - "description": "What type of task is this [ `periodic`, `timed`]\n - periodic are tasks that repeat periodically\n - timed are tasks that execute once at a specific time\n", - "type": "string" - } - }, - "required": [ - "name", - "id", - "created", - "type", - "period", - "offset", - "command", - "database" - ], - "type": "object" - }, - "type": "array" - } - } - }, - "description": "The list of tasks\n" + "description": "Fetches data about all users. You need the *Administrate* server access level\nin order to execute this REST call. Otherwise, you will only get information\nabout yourself.\n\nThe call will return a JSON object with at least the following\nattributes on success:\n\n- `user`: The name of the user as a string.\n- `active`: An optional flag that specifies whether the user is active.\n- `extra`: A JSON object with extra user information. It is used by the web\n interface to store graph viewer settings and saved queries.\n", + "operationId": "listUsers", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "description": "The users that were found.\n" + }, + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" } }, - "summary": "List all tasks", + "summary": "List available users", "tags": [ - "Tasks" + "Users" ] } }, - "/_api/tasks/{id}": { + "/_db/{database-name}/_api/user/{user}": { "delete": { - "description": "Deletes the task identified by `id` on the server.\n", - "operationId": "deleteTask", + "description": "Removes an existing user, identified by `user`.\n\nYou need *Administrate* permissions for the server access level in order to\nexecute this REST call.\n", + "operationId": "deleteUser", "parameters": [ { - "description": "The id of the task to delete.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", "in": "path", - "name": "id", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user\n", + "in": "path", + "name": "user", "required": true, "schema": { "type": "string" @@ -20140,73 +25536,42 @@ } ], "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The status code, 200 in this case.\n", - "type": "number" - }, - "error": { - "description": "`false` in this case\n", - "type": "boolean" - } - }, - "required": [ - "code", - "error" - ], - "type": "object" - } - } - }, - "description": "If the task was deleted, *HTTP 200* is returned.\n" + "202": { + "description": "Is returned if the user was removed by the server\n" + }, + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" }, "404": { - "content": { - "application/json": { - "schema": { - "properties": { - "code": { - "description": "The status code, 404 in this case.\n", - "type": "number" - }, - "error": { - "description": "`true` in this case\n", - "type": "boolean" - }, - "errorMessage": { - "description": "A plain text message stating what went wrong.\n", - "type": "string" - } - }, - "required": [ - "code", - "error", - "errorMessage" - ], - "type": "object" - } - } - }, - "description": "If the task `id` is unknown, then an *HTTP 404* is returned.\n" + "description": "The specified user does not exist\n" } }, - "summary": "Delete a task", + "summary": "Remove a user", "tags": [ - "Tasks" + "Users" ] }, "get": { - "description": "fetches one existing task on the server specified by `id`\n", - "operationId": "getTask", + "description": "Fetches data about the specified user. You can fetch information about\nyourself or you need the *Administrate* server access level in order to\nexecute this REST call.\n", + "operationId": "getUser", "parameters": [ { - "description": "The id of the task to fetch.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", "in": "path", - "name": "id", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user\n", + "in": "path", + "name": "user", "required": true, "schema": { "type": "string" @@ -20215,74 +25580,113 @@ ], "responses": { "200": { - "content": { - "application/json": { - "schema": { - "description": "The function in question\n", - "properties": { - "command": { - "description": "The JavaScript function for this task.\n", - "type": "string" - }, - "created": { - "description": "The timestamp when this task was created.\n", - "type": "number" - }, - "database": { - "description": "The database this task belongs to.\n", - "type": "string" - }, - "id": { - "description": "A string identifying the task.\n", - "type": "string" - }, - "name": { - "description": "A user-friendly name for the task.\n", - "type": "string" - }, - "offset": { - "description": "Time offset in seconds from the `created` timestamp.\n", - "type": "number" - }, - "period": { - "description": "This task should run each `period` seconds.\n", - "type": "number" - }, - "type": { - "description": "What type of task is this [ `periodic`, `timed`]\n - periodic are tasks that repeat periodically\n - timed are tasks that execute once at a specific time\n", - "type": "string" - } + "description": "The user was found.\n" + }, + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" + }, + "404": { + "description": "The user with the specified name does not exist.\n" + } + }, + "summary": "Get a user", + "tags": [ + "Users" + ] + }, + "patch": { + "description": "Partially modifies the data of an existing user. You need server access level\n*Administrate* in order to execute this REST call. Additionally, users can\nchange their own data.\n", + "operationId": "updateUserData", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "active": { + "description": "An optional flag that specifies whether the user is active.\n", + "type": "boolean" }, - "required": [ - "name", - "id", - "created", - "type", - "period", - "offset", - "command", - "database" - ], - "type": "object" - } + "extra": { + "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", + "type": "object" + }, + "passwd": { + "description": "The user password as a string.\n", + "type": "string" + } + }, + "required": [ + "passwd" + ], + "type": "object" } - }, - "description": "The requested task\n" + } } }, - "summary": "Get a task", + "responses": { + "200": { + "description": "Is returned if the user data can be replaced by the server.\n" + }, + "400": { + "description": "The JSON representation is malformed or mandatory data is missing from the request.\n" + }, + "401": { + "description": "Returned if you have *No access* database access level to the *_system*\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" + }, + "404": { + "description": "The specified user does not exist\n" + } + }, + "summary": "Update a user", "tags": [ - "Tasks" + "Users" ] }, "put": { - "description": "Registers a new task with the specified ID.\n\nNot compatible with load balancers.\n", - "operationId": "createTaskWithId", + "description": "Replaces the data of an existing user. You need server access level\n*Administrate* in order to execute this REST call. Additionally, users can\nchange their own data.\n", + "operationId": "replaceUserData", "parameters": [ { - "description": "The id of the task to create\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user.\n", "in": "path", - "name": "id", + "name": "user", "required": true, "schema": { "type": "string" @@ -20294,31 +25698,21 @@ "application/json": { "schema": { "properties": { - "command": { - "description": "The JavaScript code to be executed\n", - "type": "string" - }, - "name": { - "description": "The name of the task\n", - "type": "string" + "active": { + "description": "An optional flag that specifies whether the user is active. If not\nspecified, this will default to *true*.\n", + "type": "boolean" }, - "offset": { - "description": "Number of seconds initial delay\n", - "type": "integer" + "extra": { + "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", + "type": "object" }, - "params": { - "description": "The parameters to be passed into command\n", + "passwd": { + "description": "The user password as a string. If not specified, it will default to an empty\nstring.\n", "type": "string" - }, - "period": { - "description": "number of seconds between the executions\n", - "type": "integer" } }, "required": [ - "name", - "command", - "params" + "passwd" ], "type": "object" } @@ -20326,108 +25720,211 @@ } }, "responses": { + "200": { + "description": "Is returned if the user data can be replaced by the server.\n" + }, "400": { - "description": "If the task `id` already exists or the rest body is not accurate, *HTTP 400* is returned.\n" + "description": "The JSON representation is malformed or mandatory data is missing from the request\n" + }, + "401": { + "description": "Returned if you have *No access* database access level to the *_system*\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" + }, + "404": { + "description": "The specified user does not exist\n" } }, - "summary": "Create a task with ID", + "summary": "Replace a user", "tags": [ - "Tasks" + "Users" ] } }, - "/_api/transaction": { + "/_db/{database-name}/_api/user/{user}/database/": { "get": { - "description": "The result is an object with the `transactions` attribute, which contains\nan array of transactions.\nIn a cluster the array will contain the transactions from all Coordinators.\n\nEach array entry contains an object with the following attributes:\n\n- `id`: the transaction's id\n- `state`: the transaction's status\n", - "operationId": "listStreamTransactions", + "description": "Fetch the list of databases available to the specified `user`.\n\nYou need *Administrate* permissions for the server access level in order to\nexecute this REST call.\n\nThe call will return a JSON object with the per-database access\nprivileges for the specified user. The `result` object will contain\nthe databases names as object keys, and the associated privileges\nfor the database as values.\n\nIn case you specified `full`, the result will contain the permissions\nfor the databases as well as the permissions for the collections.\n", + "operationId": "listUserDatabases", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user for which you want to query the databases.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "Return the full set of access levels for all databases and all collections.\n", + "in": "query", + "name": "full", + "required": false, + "schema": { + "type": "boolean" + } + } + ], "responses": { "200": { - "description": "If the list of transactions can be retrieved successfully, *HTTP 200* will be returned.\n" + "description": "Returned if the list of available databases can be returned.\n" + }, + "400": { + "description": "If the access privileges are not right etc.\n" + }, + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" } }, - "summary": "List the running Stream Transactions", + "summary": "List a user\u0026rsquo;s accessible databases", "tags": [ - "Transactions" + "Users" ] - }, - "post": { - "description": "The transaction description must be passed in the body of the POST request.\n\nIf the transaction is fully executed and committed on the server,\n*HTTP 200* will be returned. Additionally, the return value of the\ncode defined in `action` will be returned in the `result` attribute.\n\nFor successfully committed transactions, the returned JSON object has the\nfollowing properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: the return value of the transaction\n\nIf the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n\nIf a transaction fails to commit, either by an exception thrown in the\n`action` code, or by an internal error, the server will respond with\nan error.\nAny other errors will be returned with any of the return codes\n*HTTP 400*, *HTTP 409*, or *HTTP 500*.\n", - "operationId": "executeJavaScriptTransaction", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "action": { - "description": "the actual transaction operations to be executed, in the\nform of stringified JavaScript code. The code will be executed on server\nside, with late binding. It is thus critical that the code specified in\n`action` properly sets up all the variables it needs.\nIf the code specified in `action` ends with a return statement, the\nvalue returned will also be returned by the REST API in the `result`\nattribute if the transaction committed successfully.\n", - "type": "string" - }, - "allowImplicit": { - "description": "Allow reading from undeclared collections.\n", - "type": "boolean" - }, - "collections": { - "description": "`collections` must be a JSON object that can have one or all sub-attributes\n`read`, `write` or `exclusive`, each being an array of collection names or a\nsingle collection name as string. Collections that will be written to in the\ntransaction must be declared with the `write` or `exclusive` attribute or it\nwill fail, whereas non-declared collections from which is solely read will be\nadded lazily. The optional sub-attribute `allowImplicit` can be set to `false`\nto let transactions fail in case of undeclared collections for reading.\nCollections for reading should be fully declared if possible, to avoid\ndeadlocks.\n", - "type": "string" - }, - "lockTimeout": { - "description": "an optional numeric value that can be used to set a\ntimeout in seconds for waiting on collection locks. This option is only\nmeaningful when using exclusive locks. If not specified, a default value of\n900 seconds will be used. Setting `lockTimeout` to `0` will make ArangoDB\nnot time out waiting for a lock.\n", - "type": "integer" - }, - "maxTransactionSize": { - "description": "Transaction size limit in bytes.\n", - "type": "integer" - }, - "params": { - "description": "optional arguments passed to `action`.\n", - "type": "string" - }, - "waitForSync": { - "description": "an optional boolean flag that, if set, will force the\ntransaction to write all data to disk before returning.\n", - "type": "boolean" - } - }, - "required": [ - "collections", - "action" - ], - "type": "object" - } + } + }, + "/_db/{database-name}/_api/user/{user}/database/{dbname}": { + "delete": { + "description": "Clears the database access level for the database `dbname` of user `user`. As\nconsequence, the default database access level is used. If there is no defined\ndefault database access level, it defaults to *No access*.\n\nYou need write permissions (*Administrate* access level) for the `_system`\ndatabase in order to execute this REST call.\n", + "operationId": "deleteUserDatabasePermissions", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the database to clear the access level for.\n", + "in": "path", + "name": "dbname", + "required": true, + "schema": { + "type": "string" } } + ], + "responses": { + "202": { + "description": "Returned if the access permissions were changed successfully.\n" + }, + "400": { + "description": "If the JSON representation is malformed or mandatory data is missing\nfrom the request.\n" + } }, + "summary": "Clear a user\u0026rsquo;s database access level", + "tags": [ + "Users" + ] + }, + "get": { + "description": "Fetch the database access level for a specific database\n", + "operationId": "getUserDatabasePermissions", + "parameters": [ + { + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user for which you want to query the databases.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the database to query the access level of.\n", + "in": "path", + "name": "dbname", + "required": true, + "schema": { + "type": "string" + } + } + ], "responses": { "200": { - "description": "If the transaction is fully executed and committed on the server,\n*HTTP 200* will be returned.\n" + "description": "Returned if the access level can be returned\n" }, "400": { - "description": "If the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400*.\n" + "description": "If the access privileges are not right etc.\n" }, - "404": { - "description": "If the transaction specification contains an unknown collection, the server\nwill respond with *HTTP 404*.\n" + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" }, - "500": { - "description": "Exceptions thrown by users will make the server respond with a return code of\n*HTTP 500*\n" + "403": { + "description": "Returned if you have *No access* server access level.\n" } }, - "summary": "Execute a JavaScript Transaction", + "summary": "Get a user\u0026rsquo;s database access level", "tags": [ - "Transactions" + "Users" ] - } - }, - "/_api/transaction/begin": { - "post": { - "description": "Begin a Stream Transaction that allows clients to call selected APIs over a\nshort period of time, referencing the transaction ID, and have the server\nexecute the operations transactionally.\n\nCommitting or aborting a running transaction must be done by the client.\nIt is bad practice to not commit or abort a transaction once you are done\nusing it. It forces the server to keep resources and collection locks\nuntil the entire transaction times out.\n\nThe transaction description must be passed in the body of the POST request.\nIf the transaction can be started on the server, *HTTP 201* will be returned.\n\nFor successfully started transactions, the returned JSON object has the\nfollowing properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: result containing\n - `id`: the identifier of the transaction\n - `status`: containing the string 'running'\n\nIf the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400* or *HTTP 404*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n", - "operationId": "beginStreamTransaction", + }, + "put": { + "description": "Sets the database access levels for the database `dbname` of user `user`. You\nneed the *Administrate* server access level in order to execute this REST\ncall.\n", + "operationId": "setUserDatabasePermissions", "parameters": [ { - "description": "Set this header to `true` to allow the Coordinator to ask any shard replica for\nthe data, not only the shard leader. This may result in \"dirty reads\".\n\nThis header decides about dirty reads for the entire transaction. Individual\nread operations, that are performed as part of the transaction, cannot override it.\n", - "in": "header", - "name": "x-arango-allow-dirty-read", - "required": false, + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the database to set the access level for.\n", + "in": "path", + "name": "dbname", + "required": true, "schema": { - "type": "boolean" + "type": "string" } } ], @@ -20436,29 +25933,13 @@ "application/json": { "schema": { "properties": { - "allowImplicit": { - "description": "Allow reading from undeclared collections.\n", - "type": "boolean" - }, - "collections": { - "description": "`collections` must be a JSON object that can have one or all sub-attributes\n`read`, `write` or `exclusive`, each being an array of collection names or a\nsingle collection name as string. Collections that will be written to in the\ntransaction must be declared with the `write` or `exclusive` attribute or it\nwill fail, whereas non-declared collections from which is solely read will be\nadded lazily.\n", + "grant": { + "description": "- Use \"rw\" to set the database access level to *Administrate*.\n- Use \"ro\" to set the database access level to *Access*.\n- Use \"none\" to set the database access level to *No access*.\n", "type": "string" - }, - "lockTimeout": { - "description": "an optional numeric value that can be used to set a\ntimeout in seconds for waiting on collection locks. This option is only\nmeaningful when using exclusive locks. If not specified, a default\nvalue will be used. Setting `lockTimeout` to `0` will make ArangoDB\nnot time out waiting for a lock.\n", - "type": "integer" - }, - "maxTransactionSize": { - "description": "Transaction size limit in bytes.\n", - "type": "integer" - }, - "waitForSync": { - "description": "an optional boolean flag that, if set, will force the\ntransaction to write all data to disk before returning.\n", - "type": "boolean" } }, "required": [ - "collections" + "grant" ], "type": "object" } @@ -20466,31 +25947,62 @@ } }, "responses": { - "201": { - "description": "If the transaction is running on the server,\n*HTTP 201* will be returned.\n" + "200": { + "description": "Returned if the access level was changed successfully.\n" }, "400": { - "description": "If the transaction specification is either missing or malformed, the server\nwill respond with *HTTP 400*.\n" + "description": "If the JSON representation is malformed or mandatory data is missing\nfrom the request.\n" }, - "404": { - "description": "If the transaction specification contains an unknown collection, the server\nwill respond with *HTTP 404*.\n" + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" } }, - "summary": "Begin a Stream Transaction", + "summary": "Set a user\u0026rsquo;s database access level", "tags": [ - "Transactions" + "Users" ] } }, - "/_api/transaction/{transaction-id}": { + "/_db/{database-name}/_api/user/{user}/database/{dbname}/{collection}": { "delete": { - "description": "Abort a running server-side transaction. Aborting is an idempotent operation.\nIt is not an error to abort a transaction more than once.\n\nIf the transaction can be aborted, *HTTP 200* will be returned.\nThe returned JSON object has the following properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: result containing\n - `id`: the identifier of the transaction\n - `status`: containing the string 'aborted'\n\nIf the transaction cannot be found, aborting is not allowed or the\ntransaction was already committed, the server\nwill respond with *HTTP 400*, *HTTP 404* or *HTTP 409*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n", - "operationId": "abortStreamTransaction", + "description": "Clears the collection access level for the collection `collection` in the\ndatabase `dbname` of user `user`. As consequence, the default collection\naccess level is used. If there is no defined default collection access level,\nit defaults to *No access*.\n\nYou need write permissions (*Administrate* access level) for the `_system`\ndatabase in order to execute this REST call.\n", + "operationId": "deleteUserCollectionPermissions", "parameters": [ { - "description": "The transaction identifier,\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", "in": "path", - "name": "transaction-id", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the database to clear the access level for.\n", + "in": "path", + "name": "dbname", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection to clear the access level for.\n", + "in": "path", + "name": "collection", "required": true, "schema": { "type": "string" @@ -20498,32 +26010,54 @@ } ], "responses": { - "200": { - "description": "If the transaction was aborted,\n*HTTP 200* will be returned.\n" + "202": { + "description": "Returned if the access permissions were changed successfully.\n" }, "400": { - "description": "If the transaction cannot be aborted, the server\nwill respond with *HTTP 400*.\n" - }, - "404": { - "description": "If the transaction was not found, the server\nwill respond with *HTTP 404*.\n" - }, - "409": { - "description": "If the transaction was already committed, the server\nwill respond with *HTTP 409*.\n" + "description": "If there was an error\n" } }, - "summary": "Abort a Stream Transaction", + "summary": "Clear a user\u0026rsquo;s collection access level", "tags": [ - "Transactions" + "Users" ] }, "get": { - "description": "The result is an object describing the status of the transaction.\nIt has at least the following attributes:\n\n- `id`: the identifier of the transaction\n\n- `status`: the status of the transaction. One of \"running\", \"committed\" or \"aborted\".\n", - "operationId": "getStreamTransaction", + "description": "Returns the collection access level for a specific collection\n", + "operationId": "getUserCollectionPermissions", "parameters": [ { - "description": "The transaction identifier.\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", "in": "path", - "name": "transaction-id", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the user for which you want to query the databases.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the database to query the access level of.\n", + "in": "path", + "name": "dbname", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the collection to query the access level of.\n", + "in": "path", + "name": "collection", "required": true, "schema": { "type": "string" @@ -20532,83 +26066,77 @@ ], "responses": { "200": { - "description": "If the transaction is fully executed and committed on the server,\n*HTTP 200* will be returned.\n" + "description": "Returned if the access level can be returned\n" }, "400": { - "description": "If the transaction identifier specified is either missing or malformed, the server\nwill respond with *HTTP 400*.\n" + "description": "If the access privileges are not right etc.\n" }, - "404": { - "description": "If the transaction was not found with the specified identifier, the server\nwill respond with *HTTP 404*.\n" + "401": { + "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + }, + "403": { + "description": "Returned if you have *No access* server access level.\n" } }, - "summary": "Get the status of a Stream Transaction", + "summary": "Get a user\u0026rsquo;s collection access level", "tags": [ - "Transactions" + "Users" ] }, "put": { - "description": "Commit a running server-side transaction. Committing is an idempotent operation.\nIt is not an error to commit a transaction more than once.\n\nIf the transaction can be committed, *HTTP 200* will be returned.\nThe returned JSON object has the following properties:\n\n- `error`: boolean flag to indicate if an error occurred (`false`\n in this case)\n\n- `code`: the HTTP status code\n\n- `result`: result containing\n - `id`: the identifier of the transaction\n - `status`: containing the string 'committed'\n\nIf the transaction cannot be found, committing is not allowed or the\ntransaction was aborted, the server\nwill respond with *HTTP 400*, *HTTP 404* or *HTTP 409*.\n\nThe body of the response will then contain a JSON object with additional error\ndetails. The object has the following attributes:\n\n- `error`: boolean flag to indicate that an error occurred (`true` in this case)\n\n- `code`: the HTTP status code\n\n- `errorNum`: the server error number\n\n- `errorMessage`: a descriptive error message\n", - "operationId": "commitStreamTransaction", + "description": "Sets the collection access level for the `collection` in the database `dbname`\nfor user `user`. You need the *Administrate* server access level in order to\nexecute this REST call.\n", + "operationId": "setUserCollectionPermissions", "parameters": [ { - "description": "The transaction identifier,\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database and administrate access to the `_system` database.\n", + "example": "_system", "in": "path", - "name": "transaction-id", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "200": { - "description": "If the transaction was committed,\n*HTTP 200* will be returned.\n" }, - "400": { - "description": "If the transaction cannot be committed, the server\nwill respond with *HTTP 400*.\n" + { + "description": "The name of the user.\n", + "in": "path", + "name": "user", + "required": true, + "schema": { + "type": "string" + } }, - "404": { - "description": "If the transaction was not found, the server\nwill respond with *HTTP 404*.\n" + { + "description": "The name of the database to set the access level for.\n", + "in": "path", + "name": "dbname", + "required": true, + "schema": { + "type": "string" + } }, - "409": { - "description": "If the transaction was already aborted, the server\nwill respond with *HTTP 409*.\n" + { + "description": "The name of the collection to set the access level for.\n", + "in": "path", + "name": "collection", + "required": true, + "schema": { + "type": "string" + } } - }, - "summary": "Commit a Stream Transaction", - "tags": [ - "Transactions" - ] - } - }, - "/_api/user": { - "post": { - "description": "Create a new user. You need server access level *Administrate* in order to\nexecute this REST call.\n", - "operationId": "createUser", + ], "requestBody": { "content": { "application/json": { "schema": { "properties": { - "active": { - "description": "An optional flag that specifies whether the user is active. If not\nspecified, this will default to `true`.\n", - "type": "boolean" - }, - "extra": { - "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", - "type": "object" - }, - "passwd": { - "description": "The user password as a string. If not specified, it will default to an empty\nstring.\n", - "type": "string" - }, - "user": { - "description": "The name of the user as a string. This is mandatory.\n", + "grant": { + "description": "Use \"rw\" to set the collection level access to *Read/Write*.\n\nUse \"ro\" to set the collection level access to *Read Only*.\n\nUse \"none\" to set the collection level access to *No access*.\n", "type": "string" } }, "required": [ - "user", - "passwd" + "grant" ], "type": "object" } @@ -20616,8 +26144,8 @@ } }, "responses": { - "201": { - "description": "Returned if the user can be added by the server\n" + "200": { + "description": "Returned if the access permissions were changed successfully.\n" }, "400": { "description": "If the JSON representation is malformed or mandatory data is missing\nfrom the request.\n" @@ -20627,80 +26155,220 @@ }, "403": { "description": "Returned if you have *No access* server access level.\n" - }, - "409": { - "description": "Returned if a user with the same name already exists.\n" } }, - "summary": "Create a user", + "summary": "Set a user\u0026rsquo;s collection access level", "tags": [ "Users" ] } }, - "/_api/user/": { + "/_db/{database-name}/_api/version": { "get": { - "description": "Fetches data about all users. You need the *Administrate* server access level\nin order to execute this REST call. Otherwise, you will only get information\nabout yourself.\n\nThe call will return a JSON object with at least the following\nattributes on success:\n\n- `user`: The name of the user as a string.\n- `active`: An optional flag that specifies whether the user is active.\n- `extra`: A JSON object with extra user information. It is used by the web\n interface to store graph viewer settings and saved queries.\n", - "operationId": "listUsers", - "responses": { - "200": { - "description": "The users that were found.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" - }, - "403": { - "description": "Returned if you have *No access* server access level.\n" - } - }, - "summary": "List available users", - "tags": [ - "Users" - ] - } - }, - "/_api/user/{user}": { - "delete": { - "description": "Removes an existing user, identified by `user`.\n\nYou need *Administrate* permissions for the server access level in order to\nexecute this REST call.\n", - "operationId": "deleteUser", + "description": "Returns the server name and version number. The response is a JSON object\nwith the following attributes:\n", + "operationId": "getVersion", "parameters": [ { - "description": "The name of the user\n", + "description": "The name of a database. Which database you use doesn't matter as long\nas the user account you authenticate with has at least read access\nto this database.\n", + "example": "_system", "in": "path", - "name": "user", + "name": "database-name", "required": true, "schema": { - "type": "string" + "type": "string" + } + }, + { + "description": "If set to `true` and if the user account you authenticate with has\nadministrate access to the `_system` database, the response contains\na `details` attribute with additional information about included\ncomponents and their versions. The attribute names and internals of\nthe `details` object may vary depending on platform and ArangoDB version.\n", + "in": "query", + "name": "details", + "required": false, + "schema": { + "type": "boolean" } } ], "responses": { - "202": { - "description": "Is returned if the user was removed by the server\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" - }, - "403": { - "description": "Returned if you have *No access* server access level.\n" - }, - "404": { - "description": "The specified user does not exist\n" + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "details": { + "description": "an optional JSON object with additional details. This is\nreturned only if the `details` query parameter is set to `true` in the\nrequest.\n", + "properties": { + "architecture": { + "description": "The CPU architecture, i.e. `64bit`\n", + "type": "string" + }, + "arm": { + "description": "`false` - this is not running on an ARM cpu\n", + "type": "string" + }, + "asan": { + "description": "has this been compiled with the asan address sanitizer turned on? (should be false)\n", + "type": "string" + }, + "assertions": { + "description": "do we have assertions compiled in (=\u003e developer version)\n", + "type": "string" + }, + "boost-version": { + "description": "which boost version do we bind\n", + "type": "string" + }, + "build-date": { + "description": "the date when this binary was created\n", + "type": "string" + }, + "build-repository": { + "description": "reference to the git-ID this was compiled from\n", + "type": "string" + }, + "compiler": { + "description": "which compiler did we use\n", + "type": "string" + }, + "cplusplus": { + "description": "C++ standards version\n", + "type": "string" + }, + "debug": { + "description": "`false` for production binaries\n", + "type": "string" + }, + "endianness": { + "description": "currently only `little` is supported\n", + "type": "string" + }, + "failure-tests": { + "description": "`false` for production binaries (the facility to invoke fatal errors is disabled)\n", + "type": "string" + }, + "fd-client-event-handler": { + "description": "which method do we use to handle fd-sets, `poll` should be here on linux.\n", + "type": "string" + }, + "fd-setsize": { + "description": "if not `poll` the fd setsize is valid for the maximum number of file descriptors\n", + "type": "string" + }, + "full-version-string": { + "description": "The full version string\n", + "type": "string" + }, + "host": { + "description": "the host ID\n", + "type": "string" + }, + "icu-version": { + "description": "Which version of ICU do we bundle\n", + "type": "string" + }, + "jemalloc": { + "description": "`true` if we use jemalloc\n", + "type": "string" + }, + "maintainer-mode": { + "description": "`false` if this is a production binary\n", + "type": "string" + }, + "mode": { + "description": "The mode arangod runs in.\n", + "enum": [ + "server", + "console", + "script" + ], + "type": "string" + }, + "openssl-version": { + "description": "which openssl version do we link?\n", + "type": "string" + }, + "platform": { + "description": "the host operating system, always `linux`\n", + "type": "string" + }, + "reactor-type": { + "description": "`epoll`\n", + "type": "string" + }, + "rocksdb-version": { + "description": "the rocksdb version this release bundles\n", + "type": "string" + }, + "server-version": { + "description": "the ArangoDB release version\n", + "type": "string" + }, + "sizeof int": { + "description": "number of bytes for integers\n", + "type": "string" + }, + "sizeof void*": { + "description": "number of bytes for void pointers\n", + "type": "string" + }, + "sse42": { + "description": "do we have a SSE 4.2 enabled cpu?\n", + "type": "string" + }, + "unaligned-access": { + "description": "does this system support unaligned memory access?\n", + "type": "string" + }, + "v8-version": { + "description": "the bundled V8 javascript engine version\n", + "type": "string" + }, + "vpack-version": { + "description": "the version of the used velocypack implementation\n", + "type": "string" + }, + "zlib-version": { + "description": "the version of the bundled zlib\n", + "type": "string" + } + }, + "type": "object" + }, + "server": { + "description": "will always contain `arango`\n", + "type": "string" + }, + "version": { + "description": "the server version string. The string has the format\n`major.minor.sub`. `major` and `minor` will be numeric, and `sub`\nmay contain a number or a textual version.\n", + "type": "string" + } + }, + "required": [ + "server", + "version" + ], + "type": "object" + } + } + }, + "description": "is returned in all cases.\n" } }, - "summary": "Remove a user", + "summary": "Get the server version", "tags": [ - "Users" + "Administration" ] - }, + } + }, + "/_db/{database-name}/_api/view": { "get": { - "description": "Fetches data about the specified user. You can fetch information about\nyourself or you need the *Administrate* server access level in order to\nexecute this REST call.\n", - "operationId": "getUser", + "description": "Returns an object containing a listing of all Views in the current database,\nregardless of their type.\n", + "operationId": "listViews", "parameters": [ { - "description": "The name of the user\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "user", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -20709,31 +26377,83 @@ ], "responses": { "200": { - "description": "The user was found.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" - }, - "403": { - "description": "Returned if you have *No access* server access level.\n" - }, - "404": { - "description": "The user with the specified name does not exist.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "result": { + "description": "The result object.\n", + "items": { + "properties": { + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "name": { + "description": "The name of the View.\n", + "example": "coll", + "type": "string" + }, + "type": { + "description": "The type of the View.\n", + "enum": [ + "arangosearch", + "search-alias" + ], + "type": "string" + } + }, + "required": [ + "name", + "type", + "id", + "globallyUniqueId" + ], + "type": "object" + }, + "type": "array" + } + }, + "required": [ + "error", + "code", + "result" + ], + "type": "object" + } + } + }, + "description": "The list of Views.\n" } }, - "summary": "Get a user", + "summary": "List all Views", "tags": [ - "Users" + "Views" ] }, - "patch": { - "description": "Partially modifies the data of an existing user. You need server access level\n*Administrate* in order to execute this REST call. Additionally, users can\nchange their own data.\n", - "operationId": "updateUserData", + "post": { + "description": "Creates a new View with a given name and properties if it does not\nalready exist.\n", + "operationId": "createView", "parameters": [ { - "description": "The name of the user.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "user", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -20745,57 +26465,471 @@ "application/json": { "schema": { "properties": { - "active": { - "description": "An optional flag that specifies whether the user is active.\n", - "type": "boolean" + "cleanupIntervalStep": { + "default": 2, + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disable).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation, a new state of the View's\n internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", + "type": "integer" }, - "extra": { - "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", + "commitIntervalMsec": { + "default": 1000, + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disable).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "default": 10000, + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disable).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n", + "properties": { + "minScore": { + "default": 0, + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" + }, + "segmentsBytesFloor": { + "default": 2097152, + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" + }, + "segmentsBytesMax": { + "default": 5368709120, + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" + }, + "segmentsMax": { + "default": 10, + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" + }, + "segmentsMin": { + "default": 1, + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" + }, + "threshold": { + "default": 0, + "description": "A value in the range `[0.0, 1.0]`.\n", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "type": { + "default": "tier", + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes. \n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], + "type": "string" + } + }, + "required": [ + "type" + ], "type": "object" }, - "passwd": { - "description": "The user password as a string.\n", + "links": { + "description": "Expects an object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" + }, + "name": { + "description": "The name of the View.\n", + "type": "string" + }, + "optimizeTopK": { + "description": "An array of strings defining sort expressions that you want to optimize.\nThis is also known as _WAND optimization_ (introduced in v3.12.0).\n\nThis option is immutable.\n\nIf you query a View with the `SEARCH` operation in combination with a\n`SORT` and `LIMIT` operation, search results can be retrieved faster if the\n`SORT` expression matches one of the optimized expressions.\n\nOnly sorting by highest rank is supported, that is, sorting by the result\nof a scoring function in descending order (`DESC`). Use `@doc` in the expression\nwhere you would normally pass the document variable emitted by the `SEARCH`\noperation to the scoring function.\n\nYou can define up to 64 expressions per View.\n\nExample: `[\"BM25(@doc) DESC\", \"TFIDF(@doc, true) DESC\"]`\n\nThis property is available in the Enterprise Edition only.\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "primaryKeyCache": { + "description": "If you enable this option, then the primary key columns are always cached in\nmemory (introduced in v3.9.6, Enterprise Edition only). This can improve the\nperformance of queries that return many documents. Otherwise, these values are\nmemory-mapped and it is up to the operating system to load them from disk into\nmemory and to evict them from memory.\n\nThis option is immutable.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", + "type": "boolean" + }, + "primarySort": { + "description": "You can define a primary sort order to enable an AQL\noptimization. If a query iterates over all documents of a View,\nwants to sort them by attribute values and the (left-most)\nfields to sort by as well as their sorting direction match\nwith the `primarySort` definition, then the `SORT` operation is\noptimized away. This option is immutable.\n\nExpects an array of objects, each specifying a field\n(attribute path) and a sort direction:\n`[ { \"field\": \"attr\", \"direction\": \"asc\"}, … ]`\n", + "items": { + "properties": { + "direction": { + "description": "The sort direction.\n\n- `\"asc\"` for ascending\n- `\"desc\"` for descending\n", + "enum": [ + "asc", + "desc" + ], + "type": "string" + }, + "field": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", + "type": "string" + } + }, + "required": [ + "field", + "direction" + ], + "type": "object" + }, + "type": "array" + }, + "primarySortCache": { + "description": "If you enable this option, then the primary sort columns are always cached in\nmemory (Enterprise Edition only). This can improve the\nperformance of queries that utilize the primary sort order. Otherwise, these\nvalues are memory-mapped and it is up to the operating system to load them from\ndisk into memory and to evict them from memory.\n\nThis option is immutable.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option.\n", + "type": "boolean" + }, + "primarySortCompression": { + "default": "lz4", + "description": "Defines how to compress the primary sort data.\n\n- `\"lz4\"`: use LZ4 fast compression.\n- `\"none\"`: disable compression to trade space for speed.\n\nThis option is immutable.\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "storedValues": { + "description": "An array of objects to describe which document attributes to store in the View\nindex. It can then cover search queries, which means the\ndata can be taken from the index directly and accessing the storage engine can\nbe avoided.\n\nThis option is immutable.\n\nEach object is expected in the following form:\n\n`{ \"fields\": [ \"attr1\", \"attr2\", ... \"attrN\" ], \"compression\": \"none\", \"cache\": false }`\n\nYou may use the following shorthand notations on View creation instead of\nan array of objects as described above. The default compression and cache\nsettings are used in this case:\n\n- An array of strings, like `[\"attr1\", \"attr2\"]`, to place each attribute into\n a separate column of the index.\n\n- An array of arrays of strings, like `[[\"attr1\", \"attr2\"]]`, to place the\n attributes into a single column of the index, or `[[\"attr1\"], [\"attr2\"]]`\n to place each attribute into a separate column. You can also mix it with the\n full form:\n\n ```json\n [\n [\"attr1\"],\n [\"attr2\", \"attr3\"],\n { \"fields\": [\"attr4\", \"attr5\"], \"cache\": true }\n ]\n ```\n\nThe `storedValues` option is not to be confused with the `storeValues` option,\nwhich allows to store meta data about attribute values in the View index.\n", + "items": { + "properties": { + "cache": { + "default": false, + "description": "Whether to always cache stored values in memory (Enterprise Edition only).\nThis can improve the query performance if stored values are involved.\nOtherwise, these values are memory-mapped and it is up to the operating system\nto load them from disk into memory and to evict them from memory.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the\ncolumn cache in cluster deployments by only using the cache for leader shards,\nsee the `--arangosearch.columns-cache-only-leader` startup option.\n", + "type": "boolean" + }, + "compression": { + "default": "lz4", + "description": "Defines the compression type used for the internal column-store.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "fields": { + "description": "An array of strings with one or more document attribute paths.\nThe specified attributes are placed into a single column of the index.\nA column with all fields that are involved in common search queries is\nideal for performance. The column should not include too many unneeded\nfields, however.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "fields" + ], + "type": "object" + }, + "type": "array" + }, + "type": { + "description": "The type of the View. Must be equal to `\"arangosearch\"`.\nThis option is immutable.\n", "type": "string" + }, + "writebufferActive": { + "default": 0, + "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (immutable, `0` = disable).\n", + "type": "integer" + }, + "writebufferIdle": { + "default": 64, + "description": "Maximum number of writers (segments) cached in the pool\n(immutable, `0` = disable).\n", + "type": "integer" + }, + "writebufferSizeMax": { + "default": 33554432, + "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. The value `0` turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(ArangoDB server startup option). This should be used carefully due to\nhigh potential memory consumption (immutable, `0` = disable).\n", + "type": "integer" } }, "required": [ - "passwd" + "name", + "type" ], "type": "object" } - } - } - }, - "responses": { - "200": { - "description": "Is returned if the user data can be replaced by the server.\n" - }, - "400": { - "description": "The JSON representation is malformed or mandatory data is missing from the request.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the *_system*\ndatabase.\n" - }, - "403": { - "description": "Returned if you have *No access* server access level.\n" + } + } + }, + "responses": { + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "cleanupIntervalStep": { + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disabled).\n", + "type": "integer" + }, + "commitIntervalMsec": { + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disabled).\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disabled).\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n", + "properties": { + "minScore": { + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" + }, + "segmentsBytesFloor": { + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" + }, + "segmentsBytesMax": { + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" + }, + "segmentsMax": { + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" + }, + "segmentsMin": { + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" + }, + "threshold": { + "description": "A value in the range `[0.0, 1.0]`\n", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "type": { + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes.\n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], + "type": "string" + } + }, + "type": "object" + }, + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "links": { + "description": "An object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" + }, + "name": { + "description": "The name of the View.\n", + "example": "coll", + "type": "string" + }, + "optimizeTopK": { + "description": "An array of strings defining sort expressions that can be optimized.\nThis is also known as _WAND optimization_ (Enterprise Edition only, introduced in v3.12.0).\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "primaryKeyCache": { + "description": "Whether the primary key columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySort": { + "description": "The primary sort order, described by an array of objects, each specifying\na field (attribute path) and a sort direction.\n", + "items": { + "properties": { + "asc": { + "description": "The sort direction.\n\n- `true` for ascending\n- `false` for descending\n", + "type": "boolean" + }, + "field": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", + "type": "string" + } + }, + "required": [ + "field", + "asc" + ], + "type": "object" + }, + "type": "array" + }, + "primarySortCache": { + "description": "Whether the primary sort columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySortCompression": { + "description": "Defines how the primary sort data is compressed.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "storedValues": { + "description": "An array of objects that describes which document attributes are stored\nin the View index for covering search queries, which means the data can\nbe taken from the index directly and accessing the storage engine can\nbe avoided.\n", + "items": { + "properties": { + "cache": { + "description": "Whether stored values are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "compression": { + "description": "The compression type used for the internal column-store.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "fields": { + "description": "An array of strings with one or more document attribute paths.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "fields" + ], + "type": "object" + }, + "type": "array" + }, + "type": { + "description": "The type of the View (`\"arangosearch\"`).\n", + "example": "arangosearch", + "type": "integer" + }, + "writebufferActive": { + "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (`0` = disabled).\n", + "type": "integer" + }, + "writebufferIdle": { + "description": "Maximum number of writers (segments) cached in the pool (`0` = disabled).\n", + "type": "integer" + }, + "writebufferSizeMax": { + "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(`0` = disabled).\n", + "type": "integer" + } + }, + "required": [ + "links", + "primarySort", + "primarySortCompression", + "optimizeTopK", + "storedValues", + "cleanupIntervalStep", + "commitIntervalMsec", + "consolidationIntervalMsec", + "consolidationPolicy", + "writebufferIdle", + "writebufferActive", + "writebufferSizeMax", + "id", + "name", + "type", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "The View has been created.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `name` or `type` attribute is missing or invalid.\n" }, - "404": { - "description": "The specified user does not exist\n" + "409": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 409, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `name` already exists.\n" } }, - "summary": "Update a user", + "summary": "Create an arangosearch View", "tags": [ - "Users" + "Views" ] - }, - "put": { - "description": "Replaces the data of an existing user. You need server access level\n*Administrate* in order to execute this REST call. Additionally, users can\nchange their own data.\n", - "operationId": "replaceUserData", + } + }, + "/_db/{database-name}/_api/view#searchalias": { + "post": { + "description": "Creates a new View with a given name and properties if it does not\nalready exist.\n", + "operationId": "createViewSearchAlias", "parameters": [ { - "description": "The name of the user.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "user", + "name": "database-name", "required": true, "schema": { "type": "string" @@ -20807,21 +26941,39 @@ "application/json": { "schema": { "properties": { - "active": { - "description": "An optional flag that specifies whether the user is active. If not\nspecified, this will default to *true*.\n", - "type": "boolean" + "indexes": { + "description": "A list of inverted indexes to add to the View.\n", + "items": { + "properties": { + "collection": { + "description": "The name of a collection.\n", + "type": "string" + }, + "index": { + "description": "The name of an inverted index of the `collection`, or the index ID without\nthe `\u003ccollection\u003e/` prefix.\n", + "type": "string" + } + }, + "required": [ + "collection", + "index" + ], + "type": "object" + }, + "type": "array" }, - "extra": { - "description": "A JSON object with extra user information. It is used by the web interface\nto store graph viewer settings and saved queries. Should not be set or\nmodified by end users, as custom attributes will not be preserved.\n", - "type": "object" + "name": { + "description": "The name of the View.\n", + "type": "string" }, - "passwd": { - "description": "The user password as a string. If not specified, it will default to an empty\nstring.\n", + "type": { + "description": "The type of the View. Must be equal to `\"search-alias\"`.\nThis option is immutable.\n", "type": "string" } }, "required": [ - "passwd" + "name", + "type" ], "type": "object" } @@ -20829,126 +26981,160 @@ } }, "responses": { - "200": { - "description": "Is returned if the user data can be replaced by the server.\n" - }, - "400": { - "description": "The JSON representation is malformed or mandatory data is missing from the request\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the *_system*\ndatabase.\n" - }, - "403": { - "description": "Returned if you have *No access* server access level.\n" - }, - "404": { - "description": "The specified user does not exist\n" - } - }, - "summary": "Replace a user", - "tags": [ - "Users" - ] - } - }, - "/_api/user/{user}/database/": { - "get": { - "description": "Fetch the list of databases available to the specified `user`.\n\nYou need *Administrate* permissions for the server access level in order to\nexecute this REST call.\n\nThe call will return a JSON object with the per-database access\nprivileges for the specified user. The `result` object will contain\nthe databases names as object keys, and the associated privileges\nfor the database as values.\n\nIn case you specified `full`, the result will contain the permissions\nfor the databases as well as the permissions for the collections.\n", - "operationId": "listUserDatabases", - "parameters": [ - { - "description": "The name of the user for which you want to query the databases.\n", - "in": "path", - "name": "user", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "Return the full set of access levels for all databases and all collections.\n", - "in": "query", - "name": "full", - "required": false, - "schema": { - "type": "boolean" - } - } - ], - "responses": { - "200": { - "description": "Returned if the list of available databases can be returned.\n" + "201": { + "content": { + "application/json": { + "schema": { + "properties": { + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "indexes": { + "description": "The list of the View's inverted indexes.\n", + "items": { + "properties": { + "collection": { + "description": "The name of a collection.\n", + "type": "string" + }, + "index": { + "description": "The name of an inverted index of the `collection`.\n", + "type": "string" + } + }, + "required": [ + "collection", + "index" + ], + "type": "object" + }, + "type": "array" + }, + "name": { + "description": "The name of the View.\n", + "type": "string" + }, + "type": { + "description": "The type of the View (`\"search-alias\"`).\n", + "type": "string" + } + }, + "required": [ + "name", + "type", + "id", + "globallyUniqueId", + "indexes" + ], + "type": "object" + } + } + }, + "description": "The View has been created.\n" }, "400": { - "description": "If the access privileges are not right etc.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `name` or `type` attribute or one of the `collection` or `index`\nattributes is missing or invalid.\nerror is returned.\n" }, - "403": { - "description": "Returned if you have *No access* server access level.\n" + "409": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 409, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `name` already exists.\n" } }, - "summary": "List a user\u0026rsquo;s accessible databases", + "summary": "Create a search-alias View", "tags": [ - "Users" + "Views" ] } }, - "/_api/user/{user}/database/{dbname}": { + "/_db/{database-name}/_api/view/{view-name}": { "delete": { - "description": "Clears the database access level for the database `dbname` of user `user`. As\nconsequence, the default database access level is used. If there is no defined\ndefault database access level, it defaults to *No access*.\n\nYou need write permissions (*Administrate* access level) for the `_system`\ndatabase in order to execute this REST call.\n", - "operationId": "deleteUserDatabasePermissions", + "description": "Deletes the View identified by `view-name`.\n", + "operationId": "deleteView", "parameters": [ - { - "description": "The name of the user.\n", - "in": "path", - "name": "user", - "required": true, - "schema": { - "type": "string" - } - }, { "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "dbname", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "202": { - "description": "Returned if the access permissions were changed successfully.\n" - }, - "400": { - "description": "If the JSON representation is malformed or mandatory data is missing\nfrom the request.\n" - } - }, - "summary": "Clear a user\u0026rsquo;s database access level", - "tags": [ - "Users" - ] - }, - "get": { - "description": "Fetch the database access level for a specific database\n", - "operationId": "getUserDatabasePermissions", - "parameters": [ - { - "description": "The name of the user for which you want to query the databases.\n", - "in": "path", - "name": "user", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the database to query\n", + "description": "The name of the View to drop.\n", "in": "path", - "name": "dbname", + "name": "view-name", "required": true, "schema": { "type": "string" @@ -20957,111 +27143,133 @@ ], "responses": { "200": { - "description": "Returned if the access level can be returned\n" - }, - "400": { - "description": "If the access privileges are not right etc.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" - }, - "403": { - "description": "Returned if you have *No access* server access level.\n" - } - }, - "summary": "Get a user\u0026rsquo;s database access level", - "tags": [ - "Users" - ] - }, - "put": { - "description": "Sets the database access levels for the database `dbname` of user `user`. You\nneed the *Administrate* server access level in order to execute this REST\ncall.\n", - "operationId": "setUserDatabasePermissions", - "parameters": [ - { - "description": "The name of the user.\n", - "in": "path", - "name": "user", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The name of the database.\n", - "in": "path", - "name": "dbname", - "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "grant": { - "description": "- Use \"rw\" to set the database access level to *Administrate*.\n- Use \"ro\" to set the database access level to *Access*.\n- Use \"none\" to set the database access level to *No access*.\n", - "type": "string" - } - }, - "required": [ - "grant" - ], - "type": "object" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "result": { + "description": "The value `true`.\n", + "example": true, + "type": "boolean" + } + }, + "required": [ + "error", + "code", + "result" + ], + "type": "object" + } } - } - } - }, - "responses": { - "200": { - "description": "Returned if the access level was changed successfully.\n" + }, + "description": "The View has been dropped successfully.\n" }, "400": { - "description": "If the JSON representation is malformed or mandatory data is missing\nfrom the request.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` path parameter is missing or invalid.\n" }, - "403": { - "description": "Returned if you have *No access* server access level.\n" + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Set a user\u0026rsquo;s database access level", + "summary": "Drop a View", "tags": [ - "Users" + "Views" ] - } - }, - "/_api/user/{user}/database/{dbname}/{collection}": { - "delete": { - "description": "Clears the collection access level for the collection `collection` in the\ndatabase `dbname` of user `user`. As consequence, the default collection\naccess level is used. If there is no defined default collection access level,\nit defaults to *No access*.\n\nYou need write permissions (*Administrate* access level) for the `_system`\ndatabase in order to execute this REST call.\n", - "operationId": "deleteUserCollectionPermissions", - "parameters": [ - { - "description": "The name of the user.\n", - "in": "path", - "name": "user", - "required": true, - "schema": { - "type": "string" - } - }, + }, + "get": { + "description": "Returns the basic information about a specific View.\n", + "operationId": "getView", + "parameters": [ { "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "dbname", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the collection.\n", + "description": "The name of the View.\n", "in": "path", - "name": "collection", + "name": "view-name", "required": true, "schema": { "type": "string" @@ -21069,44 +27277,116 @@ } ], "responses": { - "202": { - "description": "Returned if the access permissions were changed successfully.\n" + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "name": { + "description": "The name of the View.\n", + "example": "coll", + "type": "string" + }, + "type": { + "description": "The type of the View (`\"arangosearch\"`).\n", + "example": "arangosearch", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "name", + "type", + "id", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "The basic information about the View.\n" }, - "400": { - "description": "If there was an error\n" + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Clear a user\u0026rsquo;s collection access level", + "summary": "Get information about a View", "tags": [ - "Users" + "Views" ] - }, + } + }, + "/_db/{database-name}/_api/view/{view-name}/properties": { "get": { - "description": "Returns the collection access level for a specific collection\n", - "operationId": "getUserCollectionPermissions", + "description": "Returns an object containing the definition of the View identified by `view-name`.\n", + "operationId": "getViewProperties", "parameters": [ { - "description": "The name of the user for which you want to query the databases.\n", - "in": "path", - "name": "user", - "required": true, - "schema": { - "type": "string" - } - }, - { - "description": "The name of the database to query\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "dbname", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the collection\n", + "description": "The name of the View.\n", "in": "path", - "name": "collection", + "name": "view-name", "required": true, "schema": { "type": "string" @@ -21115,49 +27395,304 @@ ], "responses": { "200": { - "description": "Returned if the access level can be returned\n" + "content": { + "application/json": { + "schema": { + "properties": { + "cleanupIntervalStep": { + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disabled).\n", + "type": "integer" + }, + "code": { + "description": "The HTTP response status code.\n", + "example": 200, + "type": "integer" + }, + "commitIntervalMsec": { + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disabled).\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disabled).\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n", + "properties": { + "minScore": { + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" + }, + "segmentsBytesFloor": { + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" + }, + "segmentsBytesMax": { + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" + }, + "segmentsMax": { + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" + }, + "segmentsMin": { + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" + }, + "threshold": { + "description": "A value in the range `[0.0, 1.0]`\n", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "type": { + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes.\n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], + "type": "string" + } + }, + "type": "object" + }, + "error": { + "description": "A flag indicating that no error occurred.\n", + "example": false, + "type": "boolean" + }, + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "links": { + "description": "An object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" + }, + "name": { + "description": "The name of the View.\n", + "example": "coll", + "type": "string" + }, + "optimizeTopK": { + "description": "An array of strings defining sort expressions that can be optimized.\nThis is also known as _WAND optimization_ (Enterprise Edition only, introduced in v3.12.0).\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "primaryKeyCache": { + "description": "Whether the primary key columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySort": { + "description": "The primary sort order, described by an array of objects, each specifying\na field (attribute path) and a sort direction.\n", + "items": { + "properties": { + "asc": { + "description": "The sort direction.\n\n- `true` for ascending\n- `false` for descending\n", + "type": "boolean" + }, + "field": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", + "type": "string" + } + }, + "required": [ + "field", + "asc" + ], + "type": "object" + }, + "type": "array" + }, + "primarySortCache": { + "description": "Whether the primary sort columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySortCompression": { + "description": "Defines how the primary sort data is compressed.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "storedValues": { + "description": "An array of objects that describes which document attributes are stored\nin the View index for covering search queries, which means the data can\nbe taken from the index directly and accessing the storage engine can\nbe avoided.\n", + "items": { + "properties": { + "cache": { + "description": "Whether stored values are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "compression": { + "description": "The compression type used for the internal column-store.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "fields": { + "description": "An array of strings with one or more document attribute paths.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "fields" + ], + "type": "object" + }, + "type": "array" + }, + "type": { + "description": "The type of the View (`\"arangosearch\"`).\n", + "example": "arangosearch", + "type": "integer" + }, + "writebufferActive": { + "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (`0` = disabled).\n", + "type": "integer" + }, + "writebufferIdle": { + "description": "Maximum number of writers (segments) cached in the pool (`0` = disabled).\n", + "type": "integer" + }, + "writebufferSizeMax": { + "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(`0` = disabled).\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "links", + "primarySort", + "primarySortCompression", + "optimizeTopK", + "storedValues", + "cleanupIntervalStep", + "commitIntervalMsec", + "consolidationIntervalMsec", + "consolidationPolicy", + "writebufferIdle", + "writebufferActive", + "writebufferSizeMax", + "id", + "name", + "type", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "An object with a full description of the specified View, including\n`arangosearch` View type-dependent properties.\n" }, "400": { - "description": "If the access privileges are not right etc.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` path parameter is missing or invalid.\n" }, - "403": { - "description": "Returned if you have *No access* server access level.\n" + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Get a user\u0026rsquo;s collection access level", + "summary": "Get the properties of a View", "tags": [ - "Users" + "Views" ] }, - "put": { - "description": "Sets the collection access level for the `collection` in the database `dbname`\nfor user `user`. You need the *Administrate* server access level in order to\nexecute this REST call.\n", - "operationId": "setUserCollectionPermissions", + "patch": { + "description": "Partially changes the properties of a View by updating the specified attributes.\n", + "operationId": "updateViewProperties", "parameters": [ - { - "description": "The name of the user.\n", - "in": "path", - "name": "user", - "required": true, - "schema": { - "type": "string" - } - }, { "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "dbname", + "name": "database-name", "required": true, "schema": { "type": "string" } }, { - "description": "The name of the collection.\n", + "description": "The name of the View.\n", "in": "path", - "name": "collection", + "name": "view-name", "required": true, "schema": { "type": "string" @@ -21169,459 +27704,737 @@ "application/json": { "schema": { "properties": { - "grant": { - "description": "Use \"rw\" to set the collection level access to *Read/Write*.\n\nUse \"ro\" to set the collection level access to *Read Only*.\n\nUse \"none\" to set the collection level access to *No access*.\n", - "type": "string" + "cleanupIntervalStep": { + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disable).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation, a new state of the View's\n internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", + "type": "integer" + }, + "commitIntervalMsec": { + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disable).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disable).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n", + "properties": { + "minScore": { + "default": 0, + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" + }, + "segmentsBytesFloor": { + "default": 2097152, + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" + }, + "segmentsBytesMax": { + "default": 5368709120, + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" + }, + "segmentsMax": { + "default": 10, + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" + }, + "segmentsMin": { + "default": 1, + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" + }, + "threshold": { + "default": 0, + "description": "A value in the range `[0.0, 1.0]`.\n", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "type": { + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes. \n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "links": { + "description": "Expects an object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" } }, - "required": [ - "grant" - ], "type": "object" } } } }, - "responses": { - "200": { - "description": "Returned if the access permissions were changed successfully.\n" - }, - "400": { - "description": "If the JSON representation is malformed or mandatory data is missing\nfrom the request.\n" - }, - "401": { - "description": "Returned if you have *No access* database access level to the `_system`\ndatabase.\n" - }, - "403": { - "description": "Returned if you have *No access* server access level.\n" - } - }, - "summary": "Set a user\u0026rsquo;s collection access level", - "tags": [ - "Users" - ] - } - }, - "/_api/version": { - "get": { - "description": "Returns the server name and version number. The response is a JSON object\nwith the following attributes:\n", - "operationId": "getVersion", - "parameters": [ - { - "description": "If set to `true`, the response will contain a `details` attribute with\nadditional information about included components and their versions. The\nattribute names and internals of the `details` object may vary depending on\nplatform and ArangoDB version.\n", - "in": "query", - "name": "details", - "required": false, - "schema": { - "type": "boolean" - } - } - ], "responses": { "200": { "content": { "application/json": { "schema": { "properties": { - "details": { - "description": "an optional JSON object with additional details. This is\nreturned only if the `details` query parameter is set to `true` in the\nrequest.\n", - "properties": { - "architecture": { - "description": "The CPU architecture, i.e. `64bit`\n", - "type": "string" - }, - "arm": { - "description": "`false` - this is not running on an ARM cpu\n", - "type": "string" - }, - "asan": { - "description": "has this been compiled with the asan address sanitizer turned on? (should be false)\n", - "type": "string" - }, - "assertions": { - "description": "do we have assertions compiled in (=\u003e developer version)\n", - "type": "string" - }, - "boost-version": { - "description": "which boost version do we bind\n", - "type": "string" - }, - "build-date": { - "description": "the date when this binary was created\n", - "type": "string" - }, - "build-repository": { - "description": "reference to the git-ID this was compiled from\n", - "type": "string" - }, - "compiler": { - "description": "which compiler did we use\n", - "type": "string" - }, - "cplusplus": { - "description": "C++ standards version\n", - "type": "string" - }, - "debug": { - "description": "`false` for production binaries\n", - "type": "string" - }, - "endianness": { - "description": "currently only `little` is supported\n", - "type": "string" - }, - "failure-tests": { - "description": "`false` for production binaries (the facility to invoke fatal errors is disabled)\n", - "type": "string" - }, - "fd-client-event-handler": { - "description": "which method do we use to handle fd-sets, `poll` should be here on linux.\n", - "type": "string" - }, - "fd-setsize": { - "description": "if not `poll` the fd setsize is valid for the maximum number of file descriptors\n", - "type": "string" - }, - "full-version-string": { - "description": "The full version string\n", - "type": "string" - }, - "host": { - "description": "the host ID\n", - "type": "string" - }, - "icu-version": { - "description": "Which version of ICU do we bundle\n", - "type": "string" - }, - "jemalloc": { - "description": "`true` if we use jemalloc\n", - "type": "string" - }, - "maintainer-mode": { - "description": "`false` if this is a production binary\n", - "type": "string" - }, - "mode": { - "description": "The mode arangod runs in. Possible values: `server`, `console`, `script`\n", - "type": "string" - }, - "openssl-version": { - "description": "which openssl version do we link?\n", - "type": "string" - }, - "platform": { - "description": "the host os - `linux`, `windows` or `darwin`\n", - "type": "string" - }, - "reactor-type": { - "description": "`epoll`\n", - "type": "string" - }, - "rocksdb-version": { - "description": "the rocksdb version this release bundles\n", - "type": "string" - }, - "server-version": { - "description": "the ArangoDB release version\n", - "type": "string" - }, - "sizeof int": { - "description": "number of bytes for integers\n", - "type": "string" + "cleanupIntervalStep": { + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disabled).\n", + "type": "integer" + }, + "commitIntervalMsec": { + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disabled).\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disabled).\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n", + "properties": { + "minScore": { + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" }, - "sizeof void*": { - "description": "number of bytes for void pointers\n", - "type": "string" + "segmentsBytesFloor": { + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" }, - "sse42": { - "description": "do we have a SSE 4.2 enabled cpu?\n", - "type": "string" + "segmentsBytesMax": { + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" }, - "unaligned-access": { - "description": "does this system support unaligned memory access?\n", - "type": "string" + "segmentsMax": { + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" }, - "v8-version": { - "description": "the bundled V8 javascript engine version\n", - "type": "string" + "segmentsMin": { + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" }, - "vpack-version": { - "description": "the version of the used velocypack implementation\n", - "type": "string" + "threshold": { + "description": "A value in the range `[0.0, 1.0]`\n", + "maximum": 1, + "minimum": 0, + "type": "number" }, - "zlib-version": { - "description": "the version of the bundled zlib\n", + "type": { + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes.\n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], "type": "string" } }, "type": "object" }, - "server": { - "description": "will always contain `arango`\n", + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", "type": "string" }, - "version": { - "description": "the server version string. The string has the format\n`major.minor.sub`. `major` and `minor` will be numeric, and `sub`\nmay contain a number or a textual version.\n", + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "links": { + "description": "An object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" + }, + "name": { + "description": "The name of the View.\n", + "example": "coll", + "type": "string" + }, + "optimizeTopK": { + "description": "An array of strings defining sort expressions that can be optimized.\nThis is also known as _WAND optimization_ (Enterprise Edition only, introduced in v3.12.0).\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "primaryKeyCache": { + "description": "Whether the primary key columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySort": { + "description": "The primary sort order, described by an array of objects, each specifying\na field (attribute path) and a sort direction.\n", + "items": { + "properties": { + "asc": { + "description": "The sort direction.\n\n- `true` for ascending\n- `false` for descending\n", + "type": "boolean" + }, + "field": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", + "type": "string" + } + }, + "required": [ + "field", + "asc" + ], + "type": "object" + }, + "type": "array" + }, + "primarySortCache": { + "description": "Whether the primary sort columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySortCompression": { + "description": "Defines how the primary sort data is compressed.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], "type": "string" + }, + "storedValues": { + "description": "An array of objects that describes which document attributes are stored\nin the View index for covering search queries, which means the data can\nbe taken from the index directly and accessing the storage engine can\nbe avoided.\n", + "items": { + "properties": { + "cache": { + "description": "Whether stored values are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "compression": { + "description": "The compression type used for the internal column-store.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "fields": { + "description": "An array of strings with one or more document attribute paths.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "fields" + ], + "type": "object" + }, + "type": "array" + }, + "type": { + "description": "The type of the View (`\"arangosearch\"`).\n", + "example": "arangosearch", + "type": "integer" + }, + "writebufferActive": { + "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (`0` = disabled).\n", + "type": "integer" + }, + "writebufferIdle": { + "description": "Maximum number of writers (segments) cached in the pool (`0` = disabled).\n", + "type": "integer" + }, + "writebufferSizeMax": { + "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(`0` = disabled).\n", + "type": "integer" } }, "required": [ - "server", - "version" + "links", + "primarySort", + "primarySortCompression", + "optimizeTopK", + "storedValues", + "cleanupIntervalStep", + "commitIntervalMsec", + "consolidationIntervalMsec", + "consolidationPolicy", + "writebufferIdle", + "writebufferActive", + "writebufferSizeMax", + "id", + "name", + "type", + "globallyUniqueId" ], "type": "object" } } }, - "description": "is returned in all cases.\n" - } - }, - "summary": "Get the server version", - "tags": [ - "Administration" - ] - } - }, - "/_api/view": { - "get": { - "description": "Returns an object containing a listing of all Views in a database, regardless\nof their type. It is an array of objects with the following attributes:\n- `id`\n- `name`\n- `type`\n", - "operationId": "listViews", - "responses": { - "200": { - "description": "The list of Views\n" - } - }, - "summary": "List all Views", - "tags": [ - "Views" - ] - }, - "post": { - "description": "Creates a new View with a given name and properties if it does not\nalready exist.\n", - "operationId": "createView", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "cleanupIntervalStep": { - "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (default: 2, to disable use: 0).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation a new state of the View's\n internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", - "type": "integer" - }, - "commitIntervalMsec": { - "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (default: 1000, to disable\nuse: 0).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", - "type": "integer" - }, - "consolidationIntervalMsec": { - "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(default: 10000, to disable use: 0).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", - "type": "integer" - }, - "consolidationPolicy": { - "description": "The consolidation policy to apply for selecting which segments should be merged\n(default: {})\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n\nSub-properties:\n - `type` (string, _optional_):\n The segment candidates for the \"consolidation\" operation are selected based\n upon several possible configurable formulas as defined by their types.\n The currently supported types are:\n - `\"tier\"` (default): consolidate based on segment byte size and live\n document count as dictated by the customization attributes. If this type\n is used, then below `segments*` and `minScore` properties are available.\n - `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`. If this type is used,\n then below `threshold` property is available.\n - `threshold` (number, _optional_): value in the range `[0.0, 1.0]`\n - `segmentsBytesFloor` (number, _optional_): Defines the value (in bytes) to\n treat all smaller segments as equal for consolidation selection\n (default: 2097152)\n - `segmentsBytesMax` (number, _optional_): Maximum allowed size of all\n consolidated segments in bytes (default: 5368709120)\n - `segmentsMax` (number, _optional_): The maximum number of segments that are\n evaluated as candidates for consolidation (default: 10)\n - `segmentsMin` (number, _optional_): The minimum number of segments that are\n evaluated as candidates for consolidation (default: 1)\n - `minScore` (number, _optional_): (default: 0)\n", - "type": "object" - }, - "links": { - "description": "Expects an object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](../../../index-and-search/arangosearch/arangosearch-views-reference.md#link-properties)\nfor details.\n", - "type": "object" - }, - "name": { - "description": "The name of the View.\n", - "type": "string" - }, - "optimizeTopK": { - "description": "An array of strings defining sort expressions that you want to optimize.\nThis is also known as _WAND optimization_ (introduced in v3.12.0).\n\nIf you query a View with the `SEARCH` operation in combination with a\n`SORT` and `LIMIT` operation, search results can be retrieved faster if the\n`SORT` expression matches one of the optimized expressions.\n\nOnly sorting by highest rank is supported, that is, sorting by the result\nof a scoring function in descending order (`DESC`). Use `@doc` in the expression\nwhere you would normally pass the document variable emitted by the `SEARCH`\noperation to the scoring function.\n\nYou can define up to 64 expressions per View.\n\nExample: `[\"BM25(@doc) DESC\", \"TFIDF(@doc, true) DESC\"]`\n\nDefault: `[]`\n\nThis property is available in the Enterprise Edition only.\n", - "items": { + "description": "The View has been updated successfully.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", "type": "string" }, - "type": "array" - }, - "primaryKeyCache": { - "description": "If you enable this option, then the primary key columns are always cached in\nmemory (introduced in v3.9.6, Enterprise Edition only). This can improve the\nperformance of queries that return many documents. Otherwise, these values are\nmemory-mapped and it is up to the operating system to load them from disk into\nmemory and to evict them from memory.\n\nThis option is immutable.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } }, - "primarySort": { - "description": "A primary sort order can be defined to enable an AQL optimization. If a query\niterates over all documents of a View, wants to sort them by attribute values\nand the (left-most) fields to sort by as well as their sorting direction match\nwith the `primarySort` definition, then the `SORT` operation is optimized away.\nThis option is immutable.\n\nExpects an array of objects, each specifying a field (attribute path) and a\nsort direction (`\"asc` for ascending, `\"desc\"` for descending):\n`[ { \"field\": \"attr\", \"direction\": \"asc\"}, … ]`", - "items": { - "type": "object" + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` path parameter is missing or invalid.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" }, - "type": "array" - }, - "primarySortCache": { - "description": "If you enable this option, then the primary sort columns are always cached in\nmemory (introduced in v3.9.6, Enterprise Edition only). This can improve the\nperformance of queries that utilize the primary sort order. Otherwise, these\nvalues are memory-mapped and it is up to the operating system to load them from\ndisk into memory and to evict them from memory.\n\nThis option is immutable.\n\nSee the `--arangosearch.columns-cache-limit` startup option to control the\nmemory consumption of this cache. You can reduce the memory usage of the column\ncache in cluster deployments by only using the cache for leader shards, see the\n`--arangosearch.columns-cache-only-leader` startup option (introduced in v3.10.6).\n", - "type": "boolean" - }, - "primarySortCompression": { - "description": "Defines how to compress the primary sort data.\n\nThis option is immutable.\n\n- `\"lz4\"` (default): use LZ4 fast compression.\n- `\"none\"`: disable compression to trade space for speed.\n", - "type": "string" - }, - "storedValues": { - "description": "An array of objects to describe which document attributes to store in the View\nindex. It can then cover search queries, which means the\ndata can be taken from the index directly and accessing the storage engine can\nbe avoided.\n\nThis option is immutable.\n\nEach object is expected in the following form:\n\n`{ \"fields\": [ \"attr1\", \"attr2\", ... \"attrN\" ], \"compression\": \"none\", \"cache\": false }`\n\n- The required `fields` attribute is an array of strings with one or more\n document attribute paths. The specified attributes are placed into a single\n column of the index. A column with all fields that are involved in common\n search queries is ideal for performance. The column should not include too\n many unneeded fields, however.\n\n- The optional `compression` attribute defines the compression type used for\n the internal column-store, which can be `\"lz4\"` (LZ4 fast compression, default)\n or `\"none\"` (no compression).\n\n- The optional `cache` attribute allows you to always cache stored values in\n memory (introduced in v3.9.5, Enterprise Edition only). This can improve\n the query performance if stored values are involved. Otherwise, these values\n are memory-mapped and it is up to the operating system to load them from disk\n into memory and to evict them from memory.\n\n See the `--arangosearch.columns-cache-limit` startup option\n to control the memory consumption of this cache. You can reduce the memory\n usage of the column cache in cluster deployments by only using the cache for\n leader shards, see the `--arangosearch.columns-cache-only-leader` startup\n option (introduced in v3.10.6).\n\n You may use the following shorthand notations on View creation instead of\n an array of objects as described above. The default compression and cache\n settings are used in this case:\n\n - An array of strings, like `[\"attr1\", \"attr2\"]`, to place each attribute into\n a separate column of the index (introduced in v3.10.3).\n\n - An array of arrays of strings, like `[[\"attr1\", \"attr2\"]]`, to place the\n attributes into a single column of the index, or `[[\"attr1\"], [\"attr2\"]]`\n to place each attribute into a separate column. You can also mix it with the\n full form:\n\n ```json\n [\n [\"attr1\"],\n [\"attr2\", \"attr3\"],\n { \"fields\": [\"attr4\", \"attr5\"], \"cache\": true }\n ]\n ```\n\nThe `storedValues` option is not to be confused with the `storeValues` option,\nwhich allows to store meta data about attribute values in the View index.\n", - "items": { - "type": "object" + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" }, - "type": "array" - }, - "type": { - "description": "The type of the View. Must be equal to `\"arangosearch\"`.\nThis option is immutable.\n", - "type": "string" + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } }, - "writebufferActive": { - "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (default: 0, use 0 to disable, immutable)\n", + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `view-name` could not be found.\n" + } + }, + "summary": "Update the properties of an arangosearch View", + "tags": [ + "Views" + ] + }, + "put": { + "description": "Changes all properties of a View by replacing them, except for immutable properties.\n", + "operationId": "replaceViewProperties", + "parameters": [ + { + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the View.\n", + "in": "path", + "name": "view-name", + "required": true, + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "cleanupIntervalStep": { + "default": 2, + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disable).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation, a new state of the View's\n internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", "type": "integer" }, - "writebufferIdle": { - "description": "Maximum number of writers (segments) cached in the pool\n(default: 64, use 0 to disable, immutable)\n", + "commitIntervalMsec": { + "default": 1000, + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disable).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", "type": "integer" }, - "writebufferSizeMax": { - "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(ArangoDB server startup option). `0` value should be used carefully due to\nhigh potential memory consumption\n(default: 33554432, use 0 to disable, immutable)\n", + "consolidationIntervalMsec": { + "default": 10000, + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disable).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n", + "properties": { + "minScore": { + "default": 0, + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" + }, + "segmentsBytesFloor": { + "default": 2097152, + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" + }, + "segmentsBytesMax": { + "default": 5368709120, + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" + }, + "segmentsMax": { + "default": 10, + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" + }, + "segmentsMin": { + "default": 1, + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" + }, + "threshold": { + "default": 0, + "description": "A value in the range `[0.0, 1.0]`.\n", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "type": { + "default": "tier", + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes. \n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "links": { + "description": "Expects an object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" } }, - "required": [ - "name", - "type" - ], "type": "object" } } } }, "responses": { - "400": { - "description": "If the *name* or *type* attribute are missing or invalid, then an *HTTP 400*\nerror is returned.\n" - }, - "409": { - "description": "If a View called *name* already exists, then an *HTTP 409* error is returned.\n" - } - }, - "summary": "Create an arangosearch View", - "tags": [ - "Views" - ] - } - }, - "/_api/view#searchalias": { - "post": { - "description": "Creates a new View with a given name and properties if it does not\nalready exist.\n", - "operationId": "createViewSearchAlias", - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "indexes": { - "description": "A list of inverted indexes to add to the View.\n", - "items": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "cleanupIntervalStep": { + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disabled).\n", + "type": "integer" + }, + "commitIntervalMsec": { + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disabled).\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disabled).\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n", "properties": { - "collection": { - "description": "The name of a collection.\n", - "type": "string" + "minScore": { + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" }, - "index": { - "description": "The name of an inverted index of the `collection`, or the index ID without\nthe `\u003ccollection\u003e/` prefix.\n", + "segmentsBytesFloor": { + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" + }, + "segmentsBytesMax": { + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" + }, + "segmentsMax": { + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" + }, + "segmentsMin": { + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" + }, + "threshold": { + "description": "A value in the range `[0.0, 1.0]`\n", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "type": { + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes.\n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], "type": "string" } }, - "required": [ - "collection", - "index" - ], "type": "object" }, - "type": "array" + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "links": { + "description": "An object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" + }, + "name": { + "description": "The name of the View.\n", + "example": "coll", + "type": "string" + }, + "optimizeTopK": { + "description": "An array of strings defining sort expressions that can be optimized.\nThis is also known as _WAND optimization_ (Enterprise Edition only, introduced in v3.12.0).\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "primaryKeyCache": { + "description": "Whether the primary key columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySort": { + "description": "The primary sort order, described by an array of objects, each specifying\na field (attribute path) and a sort direction.\n", + "items": { + "properties": { + "asc": { + "description": "The sort direction.\n\n- `true` for ascending\n- `false` for descending\n", + "type": "boolean" + }, + "field": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", + "type": "string" + } + }, + "required": [ + "field", + "asc" + ], + "type": "object" + }, + "type": "array" + }, + "primarySortCache": { + "description": "Whether the primary sort columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySortCompression": { + "description": "Defines how the primary sort data is compressed.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "storedValues": { + "description": "An array of objects that describes which document attributes are stored\nin the View index for covering search queries, which means the data can\nbe taken from the index directly and accessing the storage engine can\nbe avoided.\n", + "items": { + "properties": { + "cache": { + "description": "Whether stored values are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "compression": { + "description": "The compression type used for the internal column-store.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "fields": { + "description": "An array of strings with one or more document attribute paths.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "fields" + ], + "type": "object" + }, + "type": "array" + }, + "type": { + "description": "The type of the View (`\"arangosearch\"`).\n", + "example": "arangosearch", + "type": "integer" + }, + "writebufferActive": { + "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (`0` = disabled).\n", + "type": "integer" + }, + "writebufferIdle": { + "description": "Maximum number of writers (segments) cached in the pool (`0` = disabled).\n", + "type": "integer" + }, + "writebufferSizeMax": { + "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(`0` = disabled).\n", + "type": "integer" + } }, - "name": { - "description": "The name of the View.\n", - "type": "string" + "required": [ + "links", + "primarySort", + "primarySortCompression", + "optimizeTopK", + "storedValues", + "cleanupIntervalStep", + "commitIntervalMsec", + "consolidationIntervalMsec", + "consolidationPolicy", + "writebufferIdle", + "writebufferActive", + "writebufferSizeMax", + "id", + "name", + "type", + "globallyUniqueId" + ], + "type": "object" + } + } + }, + "description": "The View has been updated successfully.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } }, - "type": { - "description": "The type of the View. Must be equal to `\"search-alias\"`.\nThis option is immutable.\n", - "type": "string" - } - }, - "required": [ - "name", - "type" - ], - "type": "object" + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` path parameter is missing or invalid.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } } - } - } - }, - "responses": { - "400": { - "description": "If the `name` or `type` attribute are missing or invalid, then an *HTTP 400*\nerror is returned.\n" - }, - "409": { - "description": "If a View called `name` already exists, then an *HTTP 409* error is returned.\n" + }, + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Create a search-alias View", + "summary": "Replace the properties of an arangosearch View", "tags": [ "Views" ] } }, - "/_api/view/{view-name}": { - "delete": { - "description": "Drops the View identified by `view-name`.\n\nIf the View was successfully dropped, an object is returned with\nthe following attributes:\n- `error`: `false`\n- `id`: The identifier of the dropped View\n", - "operationId": "deleteView", - "parameters": [ - { - "description": "The name of the View to drop.\n", - "in": "path", - "name": "view-name", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "400": { - "description": "If the `view-name` is missing, then a *HTTP 400* is returned.\n" - }, - "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Drop a View", - "tags": [ - "Views" - ] - }, + "/_db/{database-name}/_api/view/{view-name}/properties#searchalias": { "get": { - "description": "The result is an object briefly describing the View with the following attributes:\n- `id`: The identifier of the View\n- `name`: The name of the View\n- `type`: The type of the View as string\n", - "operationId": "getView", + "description": "Returns an object containing the definition of the View identified by `view-name`.\n", + "operationId": "getViewPropertiesSearchAlias", "parameters": [ { - "description": "The name of the View.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "view-name", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Get information about a View", - "tags": [ - "Views" - ] - } - }, - "/_api/view/{view-name}/properties": { - "get": { - "description": "Returns an object containing the definition of the View identified by `view-name`.\n\nThe result is an object with a full description of a specific View, including\nView type dependent properties.\n", - "operationId": "getViewProperties", - "parameters": [ + }, { "description": "The name of the View.\n", "in": "path", @@ -21633,114 +28446,198 @@ } ], "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "A unique identifier of the View (deprecated).\n", + "type": "string" + }, + "indexes": { + "description": "The list of the View's inverted indexes.\n", + "items": { + "properties": { + "collection": { + "description": "The name of a collection.\n", + "type": "string" + }, + "index": { + "description": "The name of an inverted index of the `collection`.\n", + "type": "string" + } + }, + "required": [ + "collection", + "index" + ], + "type": "object" + }, + "type": "array" + }, + "name": { + "description": "The name of the View.\n", + "type": "string" + }, + "type": { + "description": "The type of the View (`\"search-alias\"`).\n", + "type": "string" + } + }, + "required": [ + "name", + "type", + "id", + "globallyUniqueId", + "indexes" + ], + "type": "object" + } + } + }, + "description": "An object with a full description of the specified View, including\n`search-alias` View type-dependent properties.\n" + }, "400": { - "description": "If the `view-name` is missing, then a *HTTP 400* is returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` parameter is missing or invalid.\n" }, "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Get the properties of a View", + "summary": "Read properties of a View", "tags": [ "Views" ] }, "patch": { - "description": "Partially changes the properties of a View by updating the specified attributes.\n\nOn success an object with the following attributes is returned:\n- `id`: The identifier of the View\n- `name`: The name of the View\n- `type`: The View type\n- all additional `arangosearch` View implementation-specific properties\n", - "operationId": "updateViewProperties", + "description": "Updates the list of indexes of a `search-alias` View.\n", + "operationId": "updateViewPropertiesSearchAlias", "parameters": [ { - "description": "The name of the View.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "view-name", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "cleanupIntervalStep": { - "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (default: 2, to disable use: 0).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation, a new state of the View's\n internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", - "type": "integer" - }, - "commitIntervalMsec": { - "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (default: 1000, to disable\nuse: 0).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", - "type": "integer" - }, - "consolidationIntervalMsec": { - "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(default: 10000, to disable use: 0).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", - "type": "integer" - }, - "consolidationPolicy": { - "description": "The consolidation policy to apply for selecting which segments should be merged\n(default: {})\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n\nSub-properties:\n - `type` (string, _optional_):\n The segment candidates for the \"consolidation\" operation are selected based\n upon several possible configurable formulas as defined by their types.\n The currently supported types are:\n - `\"tier\"` (default): consolidate based on segment byte size and live\n document count as dictated by the customization attributes. If this type\n is used, then below `segments*` and `minScore` properties are available.\n - `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`. If this type is used,\n then below `threshold` property is available.\n - `threshold` (number, _optional_): value in the range `[0.0, 1.0]`\n - `segmentsBytesFloor` (number, _optional_): Defines the value (in bytes) to\n treat all smaller segments as equal for consolidation selection\n (default: 2097152)\n - `segmentsBytesMax` (number, _optional_): Maximum allowed size of all\n consolidated segments in bytes (default: 5368709120)\n - `segmentsMax` (number, _optional_): The maximum number of segments that are\n evaluated as candidates for consolidation (default: 10)\n - `segmentsMin` (number, _optional_): The minimum number of segments that are\n evaluated as candidates for consolidation (default: 1)\n - `minScore` (number, _optional_): (default: 0)\n", - "type": "object" - }, - "links": { - "description": "Expects an object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](../../../index-and-search/arangosearch/arangosearch-views-reference.md#link-properties)\nfor details.\n", - "type": "object" - } - }, - "type": "object" - } - } - } - }, - "responses": { - "400": { - "description": "If the `view-name` is missing, then a *HTTP 400* is returned.\n" }, - "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Update the properties of an arangosearch View", - "tags": [ - "Views" - ] - }, - "put": { - "description": "Changes all properties of a View by replacing them.\n\nOn success an object with the following attributes is returned:\n- `id`: The identifier of the View\n- `name`: The name of the View\n- `type`: The View type\n- all additional `arangosearch` View implementation-specific properties\n", - "operationId": "replaceViewProperties", - "parameters": [ { "description": "The name of the View.\n", "in": "path", "name": "view-name", "required": true, - "schema": { - "type": "string" - } - } - ], - "requestBody": { - "content": { - "application/json": { - "schema": { - "properties": { - "cleanupIntervalStep": { - "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (default: 2, to disable use: 0).\nFor the case where the consolidation policies merge segments often (i.e. a lot\nof commit+consolidate), a lower value causes a lot of disk space to be\nwasted.\nFor the case where the consolidation policies rarely merge segments (i.e. few\ninserts/deletes), a higher value impacts performance without any added\nbenefits.\n\n_Background:_\n With every \"commit\" or \"consolidate\" operation, a new state of the View'\n internal data structures is created on disk.\n Old states/snapshots are released once there are no longer any users\n remaining.\n However, the files for the released states/snapshots are left on disk, and\n only removed by \"cleanup\" operation.\n", - "type": "integer" - }, - "commitIntervalMsec": { - "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (default: 1000, to disable\nuse: 0).\nFor the case where there are a lot of inserts/updates, a higher value causes the\nindex not to account for them and memory usage continues to grow until the commit.\nA lower value impacts performance, including the case where there are no or only a\nfew inserts/updates because of synchronous locking, and it wastes disk space for\neach commit call.\n\n_Background:_\n For data retrieval, ArangoSearch follows the concept of\n \"eventually-consistent\", i.e. eventually all the data in ArangoDB will be\n matched by corresponding query expressions.\n The concept of ArangoSearch \"commit\" operations is introduced to\n control the upper-bound on the time until document addition/removals are\n actually reflected by corresponding query expressions.\n Once a \"commit\" operation is complete, all documents added/removed prior to\n the start of the \"commit\" operation will be reflected by queries invoked in\n subsequent ArangoDB transactions, in-progress ArangoDB transactions will\n still continue to return a repeatable-read state.\n", - "type": "integer" - }, - "consolidationIntervalMsec": { - "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(default: 10000, to disable use: 0).\nFor the case where there are a lot of data modification operations, a higher\nvalue could potentially have the data store consume more space and file handles.\nFor the case where there are a few data modification operations, a lower value\nimpacts performance due to no segment candidates being available for\nconsolidation.\n\n_Background:_\n For data modification, ArangoSearch follows the concept of a\n \"versioned data store\". Thus old versions of data may be removed once there\n are no longer any users of the old data. The frequency of the cleanup and\n compaction operations are governed by `consolidationIntervalMsec` and the\n candidates for compaction are selected via `consolidationPolicy`.\n", - "type": "integer" - }, - "consolidationPolicy": { - "description": "The consolidation policy to apply for selecting which segments should be merged\n(default: {})\n\n_Background:_\n With each ArangoDB transaction that inserts documents, one or more\n ArangoSearch-internal segments get created.\n Similarly, for removed documents, the segments that contain such documents\n have these documents marked as 'deleted'.\n Over time, this approach causes a lot of small and sparse segments to be\n created.\n A \"consolidation\" operation selects one or more segments and copies all of\n their valid documents into a single new segment, thereby allowing the\n search algorithm to perform more optimally and for extra file handles to be\n released once old segments are no longer used.\n\nSub-properties:\n - `type` (string, _optional_):\n The segment candidates for the \"consolidation\" operation are selected based\n upon several possible configurable formulas as defined by their types.\n The currently supported types are:\n - `\"tier\"` (default): consolidate based on segment byte size and live\n document count as dictated by the customization attributes. If this type\n is used, then below `segments*` and `minScore` properties are available.\n - `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`. If this type is used,\n then below `threshold` property is available.\n - `threshold` (number, _optional_): value in the range `[0.0, 1.0]`\n - `segmentsBytesFloor` (number, _optional_): Defines the value (in bytes) to\n treat all smaller segments as equal for consolidation selection\n (default: 2097152)\n - `segmentsBytesMax` (number, _optional_): Maximum allowed size of all\n consolidated segments in bytes (default: 5368709120)\n - `segmentsMax` (number, _optional_): The maximum number of segments that are\n evaluated as candidates for consolidation (default: 10)\n - `segmentsMin` (number, _optional_): The minimum number of segments that are\n evaluated as candidates for consolidation (default: 1)\n - `minScore` (number, _optional_): (default: 0)\n", - "type": "object" - }, - "links": { - "description": "Expects an object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](../../../index-and-search/arangosearch/arangosearch-views-reference.md#link-properties)\nfor details.\n", - "type": "object" + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "application/json": { + "schema": { + "properties": { + "indexes": { + "description": "A list of inverted indexes to add to or remove from the View.\n", + "items": { + "properties": { + "collection": { + "description": "The name of a collection.\n", + "type": "string" + }, + "index": { + "description": "The name of an inverted index of the `collection`, or the index ID without\nthe `\u003ccollection\u003e/` prefix.\n", + "type": "string" + }, + "operation": { + "default": "add", + "description": "Whether to add or remove the index to the stored `indexes` property of the View.\n", + "enum": [ + "add", + "del" + ], + "type": "string" + } + }, + "required": [ + "collection", + "index" + ], + "type": "object" + }, + "type": "array" } }, "type": "object" @@ -21749,51 +28646,154 @@ } }, "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "properties": { + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, + "id": { + "description": "The identifier of the View.\n", + "type": "string" + }, + "indexes": { + "description": "The list of inverted indexes that are part of the View.\n", + "items": { + "properties": { + "collection": { + "description": "The name of a collection.\n", + "type": "string" + }, + "index": { + "description": "The name of an inverted index of the `collection`.\n", + "type": "string" + } + }, + "required": [ + "collection", + "index" + ], + "type": "object" + }, + "type": "array" + }, + "name": { + "description": "The name of the View.\n", + "type": "string" + }, + "type": { + "description": "The View type (`\"search-alias\"`).\n", + "type": "string" + } + }, + "required": [ + "name", + "type", + "id", + "globallyUniqueId", + "indexes" + ], + "type": "object" + } + } + }, + "description": "The View has been updated successfully.\n" + }, "400": { - "description": "If the `view-name` is missing, then a *HTTP 400* is returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` parameter is missing or invalid.\n" }, "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string." + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Replace the properties of an arangosearch View", + "summary": "Update the properties of a search-alias View", "tags": [ "Views" ] - } - }, - "/_api/view/{view-name}/properties#searchalias": { - "get": { - "description": "Returns an object containing the definition of the View identified by *view-name*.\n\nThe result is an object with a full description of a specific View, including\nView type dependent properties.\n", - "operationId": "getViewPropertiesSearchAlias", + }, + "put": { + "description": "Replaces the list of indexes of a `search-alias` View.\n", + "operationId": "replaceViewPropertiesSearchAlias", "parameters": [ { - "description": "The name of the View.\n", + "description": "The name of the database.\n", + "example": "_system", "in": "path", - "name": "view-name", + "name": "database-name", "required": true, "schema": { "type": "string" } - } - ], - "responses": { - "400": { - "description": "If the *view-name* is missing, then a *HTTP 400* is returned.\n" }, - "404": { - "description": "If the *view-name* is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Read properties of a View", - "tags": [ - "Views" - ] - }, - "patch": { - "description": "Updates the list of indexes of a `search-alias` View.\n", - "operationId": "updateViewPropertiesSearchAlias", - "parameters": [ { "description": "The name of the View.\n", "in": "path", @@ -21810,7 +28810,7 @@ "schema": { "properties": { "indexes": { - "description": "A list of inverted indexes to add to or remove from the View.\n", + "description": "A list of inverted indexes for the View.\n", "items": { "properties": { "collection": { @@ -21820,10 +28820,6 @@ "index": { "description": "The name of an inverted index of the `collection`, or the index ID without\nthe `\u003ccollection\u003e/` prefix.\n", "type": "string" - }, - "operation": { - "description": "Whether to add or remove the index to the stored `indexes` property of the View.\nPossible values: `\"add\"`, `\"del\"`. The default is `\"add\"`.\n", - "type": "string" } }, "required": [ @@ -21846,6 +28842,10 @@ "application/json": { "schema": { "properties": { + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, "id": { "description": "The identifier of the View.\n", "type": "string" @@ -21881,35 +28881,114 @@ } }, "required": [ - "id", - "name", - "type", - "indexes" + "name", + "type", + "id", + "globallyUniqueId", + "indexes" + ], + "type": "object" + } + } + }, + "description": "The View has been updated successfully.\n" + }, + "400": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` parameter is missing or invalid.\n" + }, + "404": { + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" ], "type": "object" } } }, - "description": "On success, an object with the following attributes is returned:\n" - }, - "400": { - "description": "If the `view-name` is missing, then a *HTTP 400* is returned.\n" - }, - "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Update the properties of a search-alias View", + "summary": "Replace the properties of a search-alias View", "tags": [ "Views" ] - }, + } + }, + "/_db/{database-name}/_api/view/{view-name}/rename": { "put": { - "description": "Replaces the list of indexes of a `search-alias` View.\n", - "operationId": "replaceViewPropertiesSearchAlias", + "description": "Renames a View.\n\n\u003e **INFO:**\nRenaming Views is not supported in cluster deployments.\n", + "operationId": "renameView", "parameters": [ { - "description": "The name of the View.\n", + "description": "The name of the database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, + { + "description": "The name of the View to rename.\n", "in": "path", "name": "view-name", "required": true, @@ -21923,28 +29002,14 @@ "application/json": { "schema": { "properties": { - "indexes": { - "description": "A list of inverted indexes for the View.\n", - "items": { - "properties": { - "collection": { - "description": "The name of a collection.\n", - "type": "string" - }, - "index": { - "description": "The name of an inverted index of the `collection`, or the index ID without\nthe `\u003ccollection\u003e/` prefix.\n", - "type": "string" - } - }, - "required": [ - "collection", - "index" - ], - "type": "object" - }, - "type": "array" + "name": { + "description": "The new name for the View.\n", + "type": "string" } }, + "required": [ + "name" + ], "type": "object" } } @@ -21956,98 +29021,287 @@ "application/json": { "schema": { "properties": { + "cleanupIntervalStep": { + "description": "Wait at least this many commits between removing unused files in the\nArangoSearch data directory (`0` = disabled).\n", + "type": "integer" + }, + "commitIntervalMsec": { + "description": "Wait at least this many milliseconds between committing View data store\nchanges and making documents visible to queries (`0` = disabled).\n", + "type": "integer" + }, + "consolidationIntervalMsec": { + "description": "Wait at least this many milliseconds between applying `consolidationPolicy` to\nconsolidate the View data store and possibly release space on the filesystem\n(`0` = disabled).\n", + "type": "integer" + }, + "consolidationPolicy": { + "description": "The consolidation policy to apply for selecting which segments should be merged.\n\n- If the `tier` type is used, then the `segments*` and `minScore` properties are available.\n- If the `bytes_accum` type is used, then the `threshold` property is available.\n", + "properties": { + "minScore": { + "description": "Filter out consolidation candidates with a score less than this.\n", + "type": "integer" + }, + "segmentsBytesFloor": { + "description": "Defines the value (in bytes) to treat all smaller segments\nas equal for consolidation selection.\n", + "type": "integer" + }, + "segmentsBytesMax": { + "description": "Maximum allowed size of all consolidated segments in bytes.\n", + "type": "integer" + }, + "segmentsMax": { + "description": "The maximum number of segments that are evaluated as\ncandidates for consolidation.\n", + "type": "integer" + }, + "segmentsMin": { + "description": "The minimum number of segments that are\nevaluated as candidates for consolidation\n", + "type": "integer" + }, + "threshold": { + "description": "A value in the range `[0.0, 1.0]`\n", + "maximum": 1, + "minimum": 0, + "type": "number" + }, + "type": { + "description": "The segment candidates for the \"consolidation\" operation are selected based\nupon several possible configurable formulas as defined by their types.\nThe currently supported types are:\n- `\"tier\"`: consolidate based on segment byte size and live\n document count as dictated by the customization attributes.\n- `\"bytes_accum\"`: consolidate if and only if\n `{threshold} \u003e (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes`\n i.e. the sum of all candidate segment byte size is less than the total\n segment byte size multiplied by the `{threshold}`.\n", + "enum": [ + "tier", + "bytes_accum" + ], + "type": "string" + } + }, + "type": "object" + }, + "globallyUniqueId": { + "description": "A unique identifier of the View. This is an internal property.\n", + "type": "string" + }, "id": { - "description": "The identifier of the View.\n", + "description": "A unique identifier of the View (deprecated).\n", "type": "string" }, - "indexes": { - "description": "The list of inverted indexes that are part of the View.\n", + "links": { + "description": "An object with the attribute keys being names of to be linked collections,\nand the link properties as attribute values. See\n[`arangosearch` View Link Properties](https://docs.arangodb.com/3.12/index-and-search/arangosearch/arangosearch-views-reference/#link-properties)\nfor details.\n", + "type": "object" + }, + "name": { + "description": "The name of the View.\n", + "example": "coll", + "type": "string" + }, + "optimizeTopK": { + "description": "An array of strings defining sort expressions that can be optimized.\nThis is also known as _WAND optimization_ (Enterprise Edition only, introduced in v3.12.0).\n", + "items": { + "type": "string" + }, + "type": "array" + }, + "primaryKeyCache": { + "description": "Whether the primary key columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySort": { + "description": "The primary sort order, described by an array of objects, each specifying\na field (attribute path) and a sort direction.\n", "items": { "properties": { - "collection": { - "description": "The name of a collection.\n", - "type": "string" + "asc": { + "description": "The sort direction.\n\n- `true` for ascending\n- `false` for descending\n", + "type": "boolean" }, - "index": { - "description": "The name of an inverted index of the `collection`.\n", + "field": { + "description": "An attribute path. The `.` character denotes sub-attributes.\n", "type": "string" } }, "required": [ - "collection", - "index" + "field", + "asc" ], "type": "object" }, "type": "array" }, - "name": { - "description": "The name of the View.\n", + "primarySortCache": { + "description": "Whether the primary sort columns are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "primarySortCompression": { + "description": "Defines how the primary sort data is compressed.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], "type": "string" }, + "storedValues": { + "description": "An array of objects that describes which document attributes are stored\nin the View index for covering search queries, which means the data can\nbe taken from the index directly and accessing the storage engine can\nbe avoided.\n", + "items": { + "properties": { + "cache": { + "description": "Whether stored values are always cached in memory\n(Enterprise Edition only).\n", + "type": "boolean" + }, + "compression": { + "description": "The compression type used for the internal column-store.\n\n- `\"lz4\"`: LZ4 fast compression\n- `\"none\"`: no compression\n", + "enum": [ + "lz4", + "none" + ], + "type": "string" + }, + "fields": { + "description": "An array of strings with one or more document attribute paths.\n", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "fields" + ], + "type": "object" + }, + "type": "array" + }, "type": { - "description": "The View type (`\"search-alias\"`).\n", - "type": "string" + "description": "The type of the View (`\"arangosearch\"`).\n", + "example": "arangosearch", + "type": "integer" + }, + "writebufferActive": { + "description": "Maximum number of concurrent active writers (segments) that perform a\ntransaction. Other writers (segments) wait till current active writers\n(segments) finish (`0` = disabled).\n", + "type": "integer" + }, + "writebufferIdle": { + "description": "Maximum number of writers (segments) cached in the pool (`0` = disabled).\n", + "type": "integer" + }, + "writebufferSizeMax": { + "description": "Maximum memory byte size per writer (segment) before a writer (segment) flush\nis triggered. `0` value turns off this limit for any writer (buffer) and data\nis flushed periodically based on the value defined for the flush thread\n(`0` = disabled).\n", + "type": "integer" } }, "required": [ + "links", + "primarySort", + "primarySortCompression", + "optimizeTopK", + "storedValues", + "cleanupIntervalStep", + "commitIntervalMsec", + "consolidationIntervalMsec", + "consolidationPolicy", + "writebufferIdle", + "writebufferActive", + "writebufferSizeMax", "id", "name", "type", - "indexes" + "globallyUniqueId" ], "type": "object" } } }, - "description": "On success, an object with the following attributes is returned:\n" + "description": "The View has been renamed successfully.\n" }, "400": { - "description": "If the `view-name` is missing, then a *HTTP 400* is returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 400, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "The `view-name` path parameter is missing or invalid.\n" }, "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" + "content": { + "application/json": { + "schema": { + "properties": { + "code": { + "description": "The HTTP response status code.\n", + "example": 404, + "type": "integer" + }, + "error": { + "description": "A flag indicating that an error occurred.\n", + "example": true, + "type": "boolean" + }, + "errorMessage": { + "description": "A descriptive error message.\n", + "type": "string" + }, + "errorNum": { + "description": "ArangoDB error number for the error that occurred.\n", + "type": "integer" + } + }, + "required": [ + "error", + "code", + "errorNum", + "errorMessage" + ], + "type": "object" + } + } + }, + "description": "A View called `view-name` could not be found.\n" } }, - "summary": "Replace the properties of a search-alias View", + "summary": "Rename a View", "tags": [ "Views" ] } }, - "/_api/view/{view-name}/rename": { - "put": { - "description": "Renames a View. Expects an object with the attribute(s)\n- `name`: The new name\n\nIt returns an object with the attributes\n- `id`: The identifier of the View.\n- `name`: The new name of the View.\n- `type`: The View type.\n\n\u003e **INFO:**\nRenaming Views is not supported in cluster deployments.\n", - "operationId": "renameView", + "/_db/{database-name}/_api/wal/lastTick": { + "get": { + "description": "Returns the last available tick value that can be served from the server's\nreplication log. This corresponds to the tick of the latest successful operation.\n\nThe result is a JSON object containing the attributes `tick`, `time` and `server`.\n- `tick`: contains the last available tick, `time`\n- `time`: the server time as string in format `YYYY-MM-DDTHH:MM:SSZ`\n- `server`: An object with fields `version` and `serverId`\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", + "operationId": "getWalLastTick", "parameters": [ { - "description": "The name of the View to rename.\n", + "description": "The name of a database. The user account you authenticate with needs\nat least read access to this database and administrate access to the\n`_system` database.\n", + "example": "_system", "in": "path", - "name": "view-name", + "name": "database-name", "required": true, "schema": { "type": "string" } } ], - "responses": { - "400": { - "description": "If the `view-name` is missing, then a *HTTP 400* is returned.\n" - }, - "404": { - "description": "If the `view-name` is unknown, then a *HTTP 404* is returned.\n" - } - }, - "summary": "Rename a View", - "tags": [ - "Views" - ] - } - }, - "/_api/wal/lastTick": { - "get": { - "description": "Returns the last available tick value that can be served from the server's\nreplication log. This corresponds to the tick of the latest successful operation.\n\nThe result is a JSON object containing the attributes `tick`, `time` and `server`.\n- `tick`: contains the last available tick, `time`\n- `time`: the server time as string in format `YYYY-MM-DDTHH:MM:SSZ`\n- `server`: An object with fields `version` and `serverId`\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", - "operationId": "getWalLastTick", "responses": { "200": { "description": "is returned if the request was executed successfully.\n" @@ -22068,10 +29322,22 @@ ] } }, - "/_api/wal/range": { + "/_db/{database-name}/_api/wal/range": { "get": { "description": "Returns the currently available ranges of tick values for all Write-Ahead Log\n(WAL) files. The tick values can be used to determine if certain\ndata (identified by tick value) are still available for replication.\n\nThe body of the response contains a JSON object.\n- `tickMin`: minimum tick available\n- `tickMax`: maximum tick available\n- `time`: the server time as string in format `YYYY-MM-DDTHH:MM:SSZ`\n- `server`: An object with fields `version` and `serverId`\n", "operationId": "getWalRange", + "parameters": [ + { + "description": "The name of a database. The user account you authenticate with needs\nat least read access to this database and administrate access to the\n`_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + } + ], "responses": { "200": { "description": "is returned if the tick ranges could be determined successfully.\n" @@ -22092,11 +29358,21 @@ ] } }, - "/_api/wal/tail": { + "/_db/{database-name}/_api/wal/tail": { "get": { "description": "Returns data from the server's write-ahead log (also named replication log). This method can be called\nby replication clients after an initial synchronization of data. The method\nreturns all \"recent\" logged operations from the server. Clients\ncan replay and apply these operations locally so they get to the same data\nstate as the server.\n\nClients can call this method repeatedly to incrementally fetch all changes\nfrom the server. In this case, they should provide the `from` value so\nthey only get returned the log events since their last fetch.\n\nWhen the `from` query parameter is not used, the server returns log\nentries starting at the beginning of its replication log. When the `from`\nparameter is used, the server only returns log entries which have\nhigher tick values than the specified `from` value (note: the log entry with a\ntick value equal to `from` is excluded). Use the `from` value when\nincrementally fetching log data.\n\nThe `to` query parameter can be used to optionally restrict the upper bound of\nthe result to a certain tick value. If used, the result contains only log events\nwith tick values up to (including) `to`. In incremental fetching, there is no\nneed to use the `to` parameter. It only makes sense in special situations,\nwhen only parts of the change log are required.\n\nThe `chunkSize` query parameter can be used to control the size of the result.\nIt must be specified in bytes. The `chunkSize` value is only honored\napproximately. Otherwise, a too low `chunkSize` value could cause the server\nto not be able to put just one log entry into the result and return it.\nTherefore, the `chunkSize` value is only consulted after a log entry has\nbeen written into the result. If the result size is then greater than\n`chunkSize`, the server responds with as many log entries as there are\nin the response already. If the result size is still less than `chunkSize`,\nthe server tries to return more data if there's more data left to return.\n\nIf `chunkSize` is not specified, some server-side default value is used.\n\nThe `Content-Type` of the result is `application/x-arango-dump`. This is an\neasy-to-process format, with all log events going onto separate lines in the\nresponse body. Each log event itself is a JSON object, with at least the\nfollowing attributes:\n\n- `tick`: the log event tick value\n\n- `type`: the log event type\n\nIndividual log events also have additional attributes, depending on the\nevent type. A few common attributes which are used for multiple events types\nare:\n\n- `cuid`: globally unique id of the View or collection the event was for\n\n- `db`: the database name the event was for\n\n- `tid`: id of the transaction the event was contained in\n\n- `data`: the original document data\n\nFor a more detailed description of the individual replication event types\nand their data structures, see the Operation Types.\n\nThe response also contains the following HTTP headers:\n\n- `x-arango-replication-active`: whether or not the logger is active. Clients\n can use this flag as an indication for their polling frequency. If the\n logger is not active and there are no more replication events available, it\n might be sensible for a client to abort, or to go to sleep for a long time\n and try again later to check whether the logger has been activated.\n\n- `x-arango-replication-lastincluded`: the tick value of the last included\n value in the result. In incremental log fetching, this value can be used\n as the `from` value for the following request. **Note** that if the result is\n empty, the value is `0`. This value should not be used as `from` value\n by clients in the next request (otherwise the server would return the log\n events from the start of the log again).\n\n- `x-arango-replication-lastscanned`: the last tick the server scanned while\n computing the operation log. This might include operations the server did not\n returned to you due to various reasons (i.e. the value was filtered or skipped).\n You may use this value in the `lastScanned` header to allow the RocksDB storage engine\n to break up requests over multiple responses.\n\n- `x-arango-replication-lasttick`: the last tick value the server has\n logged in its write ahead log (not necessarily included in the result). By comparing the last\n tick and last included tick values, clients have an approximate indication of\n how many events there are still left to fetch.\n\n- `x-arango-replication-frompresent`: is set to _true_ if server returned\n all tick values starting from the specified tick in the _from_ parameter.\n Should this be set to false the server did not have these operations anymore\n and the client might have missed operations.\n\n- `x-arango-replication-checkmore`: whether or not there already exists more\n log data which the client could fetch immediately. If there is more log data\n available, the client could call the tailing API again with an adjusted `from`\n value to fetch remaining log entries until there are no more.\n\n If there isn't any more log data to fetch, the client might decide to go\n to sleep for a while before calling the logger again.\n\n\u003e **INFO:**\nThis method is not supported on a Coordinator in a cluster deployment.\n", "operationId": "getWalTail", "parameters": [ + { + "description": "The name of a database. The user account you authenticate with needs\nat least read access to this database and administrate access to the\n`_system` database.\n", + "example": "_system", + "in": "path", + "name": "database-name", + "required": true, + "schema": { + "type": "string" + } + }, { "description": "Whether operations for all databases should be included. If set to `false`,\nonly the operations for the current database are included. The value `true` is\nonly valid on the `_system` database. The default is `false`.\n", "in": "query", From af6afa9c419e5e5310a79ef129c13b77ee6b7245 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Mon, 28 Apr 2025 12:27:56 +0200 Subject: [PATCH 02/19] first draft, added cancel of traversal in one sided enumerator, and to all providers including mock provider - also added unit/gtest for providers --- .../Aql/ExecutionNode/EnumeratePathsNode.cpp | 12 +++-- .../Aql/ExecutionNode/ShortestPathNode.cpp | 11 ++-- arangod/Aql/ExecutionNode/TraversalNode.cpp | 9 ++-- .../Graph/Enumerators/OneSidedEnumerator.cpp | 9 +++- .../Options/OneSidedEnumeratorOptions.cpp | 5 +- .../Graph/Options/OneSidedEnumeratorOptions.h | 7 ++- arangod/Graph/Options/QueryContextObserver.h | 52 ++++++++++++++++++ .../Graph/Providers/BaseProviderOptions.cpp | 18 ++++--- arangod/Graph/Providers/BaseProviderOptions.h | 27 +++++++--- arangod/Graph/Providers/ClusterProvider.cpp | 14 +++++ .../Graph/Providers/SingleServerProvider.cpp | 5 ++ tests/Graph/DFSFinderTest.cpp | 2 +- tests/Graph/GenericGraphProviderTest.cpp | 54 ++++++++++++++++++- tests/Graph/SingleServerProviderTest.cpp | 2 +- tests/Mocks/MockGraphProvider.cpp | 10 +++- tests/Mocks/MockGraphProvider.h | 11 ++-- 16 files changed, 211 insertions(+), 37 deletions(-) create mode 100644 arangod/Graph/Options/QueryContextObserver.h diff --git a/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp b/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp index 7b819b1a1a9b..196d8f7207cf 100644 --- a/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp +++ b/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp @@ -476,13 +476,15 @@ std::unique_ptr EnumeratePathsNode::createBlock( SingleServerBaseProviderOptions forwardProviderOptions( opts->tmpVar(), std::move(usedIndexes), opts->getExpressionCtx(), {}, opts->collectionToShard(), opts->getVertexProjections(), - opts->getEdgeProjections(), opts->produceVertices(), opts->useCache()); + opts->getEdgeProjections(), opts->produceVertices(), opts->useCache(), + opts->query()); SingleServerBaseProviderOptions backwardProviderOptions( opts->tmpVar(), std::move(reversedUsedIndexes), opts->getExpressionCtx(), {}, opts->collectionToShard(), opts->getVertexProjections(), opts->getEdgeProjections(), - opts->produceVertices(), opts->useCache()); + opts->produceVertices(), opts->useCache(), + opts->query()); using Provider = SingleServerProvider; if (opts->query().queryOptions().getTraversalProfileLevel() == @@ -679,10 +681,12 @@ std::unique_ptr EnumeratePathsNode::createBlock( auto cache = std::make_shared( opts->query().resourceMonitor()); ClusterBaseProviderOptions forwardProviderOptions(cache, engines(), false, - opts->produceVertices()); + opts->produceVertices(), + opts->query()); forwardProviderOptions.setClearEdgeCacheOnClear(false); ClusterBaseProviderOptions backwardProviderOptions(cache, engines(), true, - opts->produceVertices()); + opts->produceVertices(), + opts->query()); backwardProviderOptions.setClearEdgeCacheOnClear(false); // A comment is in order here: For all cases covered here // (k-shortest-paths, all shortest paths, k-paths) we do not need to diff --git a/arangod/Aql/ExecutionNode/ShortestPathNode.cpp b/arangod/Aql/ExecutionNode/ShortestPathNode.cpp index 3a7ba73faf4b..11425f15f738 100644 --- a/arangod/Aql/ExecutionNode/ShortestPathNode.cpp +++ b/arangod/Aql/ExecutionNode/ShortestPathNode.cpp @@ -437,13 +437,14 @@ std::unique_ptr ShortestPathNode::createBlock( SingleServerBaseProviderOptions forwardProviderOptions( opts->tmpVar(), std::move(usedIndexes), opts->getExpressionCtx(), {}, opts->collectionToShard(), opts->getVertexProjections(), - opts->getEdgeProjections(), opts->produceVertices(), opts->useCache()); + opts->getEdgeProjections(), opts->produceVertices(), opts->useCache(), + opts->query()); SingleServerBaseProviderOptions backwardProviderOptions( opts->tmpVar(), std::move(reversedUsedIndexes), opts->getExpressionCtx(), {}, opts->collectionToShard(), opts->getVertexProjections(), opts->getEdgeProjections(), - opts->produceVertices(), opts->useCache()); + opts->produceVertices(), opts->useCache(), opts->query()); auto usesWeight = checkWeight(forwardProviderOptions, backwardProviderOptions); @@ -511,9 +512,11 @@ std::unique_ptr ShortestPathNode::createBlock( auto cache = std::make_shared( opts->query().resourceMonitor()); ClusterBaseProviderOptions forwardProviderOptions(cache, engines(), false, - opts->produceVertices()); + opts->produceVertices(), + opts->query()); ClusterBaseProviderOptions backwardProviderOptions(cache, engines(), true, - opts->produceVertices()); + opts->produceVertices(), + opts->query()); auto usesWeight = checkWeight(forwardProviderOptions, backwardProviderOptions); diff --git a/arangod/Aql/ExecutionNode/TraversalNode.cpp b/arangod/Aql/ExecutionNode/TraversalNode.cpp index 123e65415774..3fdf08ac3903 100644 --- a/arangod/Aql/ExecutionNode/TraversalNode.cpp +++ b/arangod/Aql/ExecutionNode/TraversalNode.cpp @@ -818,7 +818,8 @@ std::unique_ptr TraversalNode::createBlock( TraverserOptions* opts = this->options(); arangodb::graph::OneSidedEnumeratorOptions options{opts->minDepth, - opts->maxDepth}; + opts->maxDepth, + opts->query()}; /* * PathValidator Disjoint Helper (TODO [GraphRefactor]: Copy from createBlock) * Clean this up as soon we clean up the whole TraversalNode as well. @@ -915,7 +916,8 @@ ClusterBaseProviderOptions TraversalNode::getClusterBaseProviderOptions( opts->produceVertices(), &opts->getExpressionCtx(), filterConditionVariables, - std::move(availableDepthsSpecificConditions)}; + std::move(availableDepthsSpecificConditions), + opts->query()}; } SingleServerBaseProviderOptions @@ -936,7 +938,8 @@ TraversalNode::getSingleServerBaseProviderOptions( opts->getVertexProjections(), opts->getEdgeProjections(), opts->produceVertices(), - opts->useCache()}; + opts->useCache(), + opts->query()}; } /// @brief creates corresponding ExecutionBlock diff --git a/arangod/Graph/Enumerators/OneSidedEnumerator.cpp b/arangod/Graph/Enumerators/OneSidedEnumerator.cpp index 22893dbd8d5b..c55db9aa159b 100644 --- a/arangod/Graph/Enumerators/OneSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/OneSidedEnumerator.cpp @@ -107,8 +107,13 @@ void OneSidedEnumerator::clearProvider() { } template -auto OneSidedEnumerator::computeNeighbourhoodOfNextVertex() - -> void { +void OneSidedEnumerator::computeNeighbourhoodOfNextVertex() { + if (_options.isKilled()) { + // Clear false may sounds misleading, but this means we do not want to keep the path store + clear(false); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + // Pull next element from Queue // Do 1 step search TRI_ASSERT(!_queue.isEmpty()); diff --git a/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp b/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp index ccfd8dcbff5b..2bf295218e5d 100644 --- a/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp +++ b/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp @@ -27,9 +27,8 @@ using namespace arangodb; using namespace arangodb::graph; -OneSidedEnumeratorOptions::OneSidedEnumeratorOptions(size_t minDepth, - size_t maxDepth) - : _minDepth(minDepth), _maxDepth(maxDepth) {} +OneSidedEnumeratorOptions::OneSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, aql::QueryContext& query) + : _minDepth(minDepth), _maxDepth(maxDepth), _observer(query) {} OneSidedEnumeratorOptions::~OneSidedEnumeratorOptions() = default; diff --git a/arangod/Graph/Options/OneSidedEnumeratorOptions.h b/arangod/Graph/Options/OneSidedEnumeratorOptions.h index fcfd34d8f9a1..c1519906e55d 100644 --- a/arangod/Graph/Options/OneSidedEnumeratorOptions.h +++ b/arangod/Graph/Options/OneSidedEnumeratorOptions.h @@ -24,20 +24,25 @@ #pragma once +#include "Graph/Options/QueryContextObserver.h" +// TODO HEIKO: do not include h file here, use forward declaration instead +// TODO HEIKO: do not include h file here, use forward declaration instead #include namespace arangodb::graph { struct OneSidedEnumeratorOptions { public: - OneSidedEnumeratorOptions(size_t minDepth, size_t maxDepth); + OneSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, aql::QueryContext& query); ~OneSidedEnumeratorOptions(); [[nodiscard]] size_t getMinDepth() const noexcept; [[nodiscard]] size_t getMaxDepth() const noexcept; + [[nodiscard]] bool isKilled() const noexcept { return _observer.isKilled(); } private: size_t const _minDepth; size_t const _maxDepth; + QueryContextObserver _observer; }; } // namespace arangodb::graph diff --git a/arangod/Graph/Options/QueryContextObserver.h b/arangod/Graph/Options/QueryContextObserver.h new file mode 100644 index 000000000000..d7afffc0cab9 --- /dev/null +++ b/arangod/Graph/Options/QueryContextObserver.h @@ -0,0 +1,52 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2024 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Business Source License 1.1 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// https://github.com/arangodb/arangodb/blob/devel/LICENSE +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Michael Hackstein +/// @author Heiko Kernbach +//////////////////////////////////////////////////////////////////////////////// + +#pragma once + +#include "Aql/QueryContext.h" + +// This class serves as a wrapper around QueryContext to explicitly track where query killing +// is being used in the graph traversal code. It provides a single point of access to check +// if a query has been killed, making it easier to maintain and modify the query killing +// behavior if needed. +// +// While this adds a small layer of indirection, it helps with code clarity and maintainability. +// If profiling shows this wrapper causes significant overhead, we can remove it and use +// QueryContext directly. +// +// We can change this or discuss if this approach is not liked. + +namespace arangodb::graph { + +class QueryContextObserver { + public: + explicit QueryContextObserver(aql::QueryContext& query) : _query(query) {} + + [[nodiscard]] bool isKilled() const { return _query.killed(); } + + private: + aql::QueryContext& _query; +}; + +} // namespace arangodb::graph \ No newline at end of file diff --git a/arangod/Graph/Providers/BaseProviderOptions.cpp b/arangod/Graph/Providers/BaseProviderOptions.cpp index ee005d54239f..a0cc669dcd4e 100644 --- a/arangod/Graph/Providers/BaseProviderOptions.cpp +++ b/arangod/Graph/Providers/BaseProviderOptions.cpp @@ -86,7 +86,8 @@ SingleServerBaseProviderOptions::SingleServerBaseProviderOptions( MonitoredCollectionToShardMap const& collectionToShardMap, aql::Projections const& vertexProjections, aql::Projections const& edgeProjections, bool produceVertices, - bool useCache) + bool useCache, + aql::QueryContext& query) : _temporaryVariable(tmpVar), _indexInformation(std::move(indexInfo)), _expressionContext(expressionContext), @@ -96,7 +97,8 @@ SingleServerBaseProviderOptions::SingleServerBaseProviderOptions( _vertexProjections{vertexProjections}, _edgeProjections{edgeProjections}, _produceVertices(produceVertices), - _useCache(useCache) {} + _useCache(useCache), + _queryObserver(query) {} aql::Variable const* SingleServerBaseProviderOptions::tmpVar() const { return _temporaryVariable; @@ -169,13 +171,15 @@ void SingleServerBaseProviderOptions::unPrepareContext() { ClusterBaseProviderOptions::ClusterBaseProviderOptions( std::shared_ptr cache, std::unordered_map const* engines, bool backward, - bool produceVertices) + bool produceVertices, + aql::QueryContext& query) : _cache(std::move(cache)), _engines(engines), _backward(backward), _produceVertices(produceVertices), _expressionContext(nullptr), - _weightCallback(std::nullopt) { + _weightCallback(std::nullopt), + _queryObserver(query) { TRI_ASSERT(_cache != nullptr); TRI_ASSERT(_engines != nullptr); } @@ -186,7 +190,8 @@ ClusterBaseProviderOptions::ClusterBaseProviderOptions( bool produceVertices, aql::FixedVarExpressionContext* expressionContext, std::vector> filterConditionVariables, - std::unordered_set availableDepthsSpecificConditions) + std::unordered_set availableDepthsSpecificConditions, + aql::QueryContext& query) : _cache(std::move(cache)), _engines(engines), _backward(backward), @@ -195,7 +200,8 @@ ClusterBaseProviderOptions::ClusterBaseProviderOptions( _filterConditionVariables(filterConditionVariables), _weightCallback(std::nullopt), _availableDepthsSpecificConditions( - std::move(availableDepthsSpecificConditions)) { + std::move(availableDepthsSpecificConditions)), + _queryObserver(query) { TRI_ASSERT(_cache != nullptr); TRI_ASSERT(_engines != nullptr); } diff --git a/arangod/Graph/Providers/BaseProviderOptions.h b/arangod/Graph/Providers/BaseProviderOptions.h index 654562dacabb..2d0f79fecc89 100644 --- a/arangod/Graph/Providers/BaseProviderOptions.h +++ b/arangod/Graph/Providers/BaseProviderOptions.h @@ -28,8 +28,10 @@ #include "Aql/InAndOutRowExpressionContext.h" #include "Aql/NonConstExpressionContainer.h" #include "Aql/Projections.h" +#include "Aql/QueryContext.h" #include "Basics/MemoryTypes/MemoryTypes.h" #include "Graph/Cache/RefactoredClusterTraverserCache.h" +#include "Graph/Options/QueryContextObserver.h" #include "Transaction/Methods.h" #ifdef USE_ENTERPRISE @@ -45,7 +47,6 @@ namespace arangodb { namespace aql { -class QueryContext; struct AstNode; class InputAqlItemRow; } // namespace aql @@ -99,10 +100,10 @@ struct SingleServerBaseProviderOptions { MonitoredCollectionToShardMap const& collectionToShardMap, aql::Projections const& vertexProjections, aql::Projections const& edgeProjections, bool produceVertices, - bool useCache); + bool useCache, + aql::QueryContext& query); - SingleServerBaseProviderOptions(SingleServerBaseProviderOptions const&) = - delete; + SingleServerBaseProviderOptions(SingleServerBaseProviderOptions const&) = delete; SingleServerBaseProviderOptions(SingleServerBaseProviderOptions&&) = default; aql::Variable const* tmpVar() const; @@ -132,6 +133,10 @@ struct SingleServerBaseProviderOptions { aql::Projections const& getEdgeProjections() const; + bool isKilled() const noexcept { + return _queryObserver.isKilled(); + } + private: // The temporary Variable used in the Indexes aql::Variable const* _temporaryVariable; @@ -171,6 +176,8 @@ struct SingleServerBaseProviderOptions { bool const _produceVertices; bool const _useCache; + + QueryContextObserver _queryObserver; }; struct ClusterBaseProviderOptions { @@ -181,7 +188,8 @@ struct ClusterBaseProviderOptions { ClusterBaseProviderOptions( std::shared_ptr cache, std::unordered_map const* engines, bool backward, - bool produceVertices); + bool produceVertices, + aql::QueryContext& query); ClusterBaseProviderOptions( std::shared_ptr cache, @@ -189,7 +197,8 @@ struct ClusterBaseProviderOptions { bool produceVertices, aql::FixedVarExpressionContext* expressionContext, std::vector> filterConditionVariables, - std::unordered_set availableDepthsSpecificConditions); + std::unordered_set availableDepthsSpecificConditions, + aql::QueryContext& query); RefactoredClusterTraverserCache* getCache(); @@ -227,6 +236,10 @@ struct ClusterBaseProviderOptions { _clearEdgeCacheOnClear = flag; } + bool isKilled() const noexcept { + return _queryObserver.isKilled(); + } + private: std::shared_ptr _cache; @@ -264,6 +277,8 @@ struct ClusterBaseProviderOptions { // not true and hurts performance. Therefore, for these cases it is possible // to set this flag to `false` to retain cached data across calls to `clear`. bool _clearEdgeCacheOnClear = true; + + QueryContextObserver _queryObserver; }; } // namespace graph diff --git a/arangod/Graph/Providers/ClusterProvider.cpp b/arangod/Graph/Providers/ClusterProvider.cpp index 9a1efd29c1ed..b8d69b1a03e0 100644 --- a/arangod/Graph/Providers/ClusterProvider.cpp +++ b/arangod/Graph/Providers/ClusterProvider.cpp @@ -445,6 +445,10 @@ Result ClusterProvider::fetchEdgesFromEngines(Step* step) { template auto ClusterProvider::fetchVertices( std::vector const& looseEnds) -> std::vector { + if (_opts.isKilled()) { + clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } std::vector result{}; if (!looseEnds.empty()) { @@ -471,6 +475,11 @@ auto ClusterProvider::fetchVertices( template auto ClusterProvider::fetchEdges( std::vector const& fetchedVertices) -> Result { + if (_opts.isKilled()) { + clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + for (auto const& step : fetchedVertices) { if (!_vertexConnectedEdges.contains(step->getVertex().getID())) { auto res = fetchEdgesFromEngines(step); @@ -501,6 +510,11 @@ template auto ClusterProvider::expand( Step const& step, size_t previous, std::function const& callback) -> void { + if (_opts.isKilled()) { + clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + TRI_ASSERT(!step.isLooseEnd()); auto const& vertex = step.getVertex(); diff --git a/arangod/Graph/Providers/SingleServerProvider.cpp b/arangod/Graph/Providers/SingleServerProvider.cpp index 19b826b9c356..84a20a18f548 100644 --- a/arangod/Graph/Providers/SingleServerProvider.cpp +++ b/arangod/Graph/Providers/SingleServerProvider.cpp @@ -164,6 +164,11 @@ auto SingleServerProvider::expand( TRI_ASSERT(!step.isLooseEnd()); auto const& vertex = step.getVertex(); + if (_opts.isKilled()) { + clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + LOG_TOPIC("c9169", TRACE, Logger::GRAPHS) << " Expanding " << vertex.getID(); diff --git a/tests/Graph/DFSFinderTest.cpp b/tests/Graph/DFSFinderTest.cpp index 95c954b03ee4..a6559c6b80bd 100644 --- a/tests/Graph/DFSFinderTest.cpp +++ b/tests/Graph/DFSFinderTest.cpp @@ -174,7 +174,7 @@ class DFSFinderTest } auto pathFinder(size_t minDepth, size_t maxDepth) -> DFSFinder { - arangodb::graph::OneSidedEnumeratorOptions options{minDepth, maxDepth}; + arangodb::graph::OneSidedEnumeratorOptions options{minDepth, maxDepth, *_query.get()}; PathValidatorOptions validatorOpts{&_tmpVar, _expressionContext}; return DFSFinder( {*_query.get(), diff --git a/tests/Graph/GenericGraphProviderTest.cpp b/tests/Graph/GenericGraphProviderTest.cpp index fa8c7fce614a..d4eeae194aba 100644 --- a/tests/Graph/GenericGraphProviderTest.cpp +++ b/tests/Graph/GenericGraphProviderTest.cpp @@ -37,7 +37,9 @@ #include "Graph/Steps/SingleServerProviderStep.h" #include "Graph/TraverserOptions.h" +#include #include +#include using namespace arangodb; using namespace arangodb::tests; @@ -138,7 +140,8 @@ class GraphProviderTest : public ::testing::Test { std::move(usedIndexes), std::unordered_map>{}), *_expressionContext.get(), {}, _emptyShardMap, _vertexProjections, - _edgeProjections, /*produceVertices*/ true, /*useCache*/ true); + _edgeProjections, /*produceVertices*/ true, /*useCache*/ true, + *query); return SingleServerProvider( *query.get(), std::move(opts), resourceMonitor); } @@ -241,7 +244,7 @@ class GraphProviderTest : public ::testing::Test { std::make_shared(resourceMonitor); ClusterBaseProviderOptions opts(clusterCache, clusterEngines.get(), false, - true); + true, *query); return ClusterProvider(*query.get(), std::move(opts), resourceMonitor); } @@ -451,6 +454,53 @@ TYPED_TEST(GraphProviderTest, destroy_engines) { } } +TYPED_TEST(GraphProviderTest, should_cancel_traversal_when_query_is_aborted) { + // Create a graph with 4 nodes in a circle (bidirectional edges) + MockGraph g{}; + // Add edges in a circle (0->1->2->3->0) and back + g.addEdge(0, 1); + g.addEdge(1, 2); + g.addEdge(2, 3); + g.addEdge(3, 0); + // Add reverse edges + g.addEdge(1, 0); + g.addEdge(2, 1); + g.addEdge(3, 2); + g.addEdge(0, 3); + + std::unordered_map>> const& + expectedVerticesEdgesBundleToFetch = {{0, {}}}; + + auto testee = this->makeProvider(g, expectedVerticesEdgesBundleToFetch); + std::string startString = g.vertexToId(0); + VPackHashedStringRef startH{startString.c_str(), + static_cast(startString.length())}; + auto start = testee.startVertex(startH); + + if (start.isLooseEnd()) { + std::vector looseEnds{}; + looseEnds.emplace_back(&start); + auto futures = testee.fetch(looseEnds); + auto steps = futures.waitAndGet(); + } + + std::thread abortThread([this]() { + this->query->kill(); + std::this_thread::sleep_for(std::chrono::milliseconds(300)); + }); + + EXPECT_THROW( + { + while (true) { + testee.expand(start, 0, + [](typename decltype(testee)::Step n) -> void {}); + } + }, + arangodb::basics::Exception); + + abortThread.join(); +} + } // namespace generic_graph_provider_test } // namespace tests } // namespace arangodb diff --git a/tests/Graph/SingleServerProviderTest.cpp b/tests/Graph/SingleServerProviderTest.cpp index d1ee8dceb19f..9c1077e26b61 100644 --- a/tests/Graph/SingleServerProviderTest.cpp +++ b/tests/Graph/SingleServerProviderTest.cpp @@ -119,7 +119,7 @@ class SingleServerProviderTest : public ::testing::Test { std::move(usedIndexes), std::unordered_map>{}), *_expressionContext.get(), {}, _emptyShardMap, _vertexProjections, - _edgeProjections, /*produceVertices*/ true, /*useCache*/ true); + _edgeProjections, /*produceVertices*/ true, /*useCache*/ true, *query); return {*query.get(), std::move(opts), _resourceMonitor}; } diff --git a/tests/Mocks/MockGraphProvider.cpp b/tests/Mocks/MockGraphProvider.cpp index 53cd2c0fb8a2..de9218724b50 100644 --- a/tests/Mocks/MockGraphProvider.cpp +++ b/tests/Mocks/MockGraphProvider.cpp @@ -101,7 +101,8 @@ MockGraphProvider::MockGraphProvider(arangodb::aql::QueryContext& queryContext, : _trx(queryContext.newTrxContext()), _reverse(opts.reverse()), _looseEnds(opts.looseEnds()), - _stats{} { + _stats{}, + _queryContext(queryContext) { for (auto const& it : opts.data().edges()) { _fromIndex[it._from].push_back(it); _toIndex[it._to].push_back(it); @@ -159,9 +160,16 @@ auto MockGraphProvider::fetch(std::vector const& looseEnds) auto MockGraphProvider::expand(Step const& step, size_t previous, std::function callback) -> void { + if (isKilled()) { + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + std::vector results{}; results = expand(step, previous); for (auto const& s : results) { + if (isKilled()) { + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } callback(s); } } diff --git a/tests/Mocks/MockGraphProvider.h b/tests/Mocks/MockGraphProvider.h index ca3dc5ead1a7..bb3021f7dd6e 100644 --- a/tests/Mocks/MockGraphProvider.h +++ b/tests/Mocks/MockGraphProvider.h @@ -36,9 +36,11 @@ #include "Transaction/Hints.h" #include "Transaction/Methods.h" +#include "Aql/QueryContext.h" #include "Graph/Providers/BaseStep.h" - -#include +#include "Graph/EdgeDocumentToken.h" +#include "Aql/TraversalStats.h" +#include "VocBase/vocbase.h" namespace arangodb { @@ -288,7 +290,7 @@ class MockGraphProvider { ~MockGraphProvider(); MockGraphProvider& operator=(MockGraphProvider const&) = delete; - MockGraphProvider& operator=(MockGraphProvider&&) = default; + MockGraphProvider& operator=(MockGraphProvider&&) = delete; void destroyEngines(){}; auto startVertex(VertexType vertex, size_t depth = 0, double weight = 0.0) @@ -339,6 +341,8 @@ class MockGraphProvider { _weightCallback = std::move(callback); } + bool isKilled() const { return _queryContext.killed(); } + private: auto decideProcessable() const -> bool; @@ -351,6 +355,7 @@ class MockGraphProvider { arangodb::aql::TraversalStats _stats; // Optional callback to compute the weight of an edge. std::optional _weightCallback; + arangodb::aql::QueryContext& _queryContext; }; } // namespace graph } // namespace tests From 4cfc1714916c997906fa5bdf326812e85e438714 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Tue, 6 May 2025 12:48:30 +0200 Subject: [PATCH 03/19] added bidirectionalCircle as new graph in graph test suite --- .../aql-graph-traversal-generic-graphs.js | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js index acc2b4e8989c..59598100e36e 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js @@ -1161,6 +1161,60 @@ protoGraphs.moreAdvancedPath = new ProtoGraph("moreAdvancedPath", [ ); } +/* + * B + * ↕ ↕ + * A C + * ↕ ↕ + * D + */ +protoGraphs.bidirectionalCircle = new ProtoGraph("bidirectionalCircle", [ + ["A", "B", 1], + ["B", "A", 1], + ["B", "C", 1], + ["C", "B", 1], + ["C", "D", 1], + ["D", "C", 1], + ["D", "A", 1], + ["A", "D", 1] + ], + [1, 2, 4], + [ + { + numberOfShards: 1, + vertexSharding: + [ + ["A", 0], + ["B", 0], + ["C", 0], + ["D", 0] + ] + }, + { + numberOfShards: 2, + vertexSharding: + [ + ["A", 0], + ["B", 1], + ["C", 0], + ["D", 1] + ] + }, + { + numberOfShards: 4, + vertexSharding: + [ + ["A", 0], + ["B", 1], + ["C", 2], + ["D", 3] + ] + } + ], + [], + true +); + exports.ProtoGraph = ProtoGraph; exports.protoGraphs = protoGraphs; exports.TestVariants = TestVariants; From db7f093669fc51a7206804b3a7c404686b6d373a Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Tue, 6 May 2025 13:21:53 +0200 Subject: [PATCH 04/19] added hugeCompleteGraph --- .../aql-graph-traversal-generic-graphs.js | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js index 59598100e36e..cc5abddb7f83 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js @@ -841,6 +841,73 @@ protoGraphs.completeGraph = new ProtoGraph("completeGraph", [ ] ); +// Generate node names +const generateNodeNames = (count) => { + const alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'; + const nodes = []; + + // First add single letter nodes + for (let i = 0; i < Math.min(count, alphabet.length); i++) { + nodes.push(alphabet[i]); + } + + // If we need more nodes, add two-letter combinations + if (count > alphabet.length) { + for (let i = 0; i < alphabet.length && nodes.length < count; i++) { + for (let j = 0; j < alphabet.length && nodes.length < count; j++) { + nodes.push(alphabet[i] + alphabet[j]); + } + } + } + + return nodes; +}; + +// Generate edges for complete graph +const generateCompleteGraphEdges = (nodes) => { + const edges = []; + for (let i = 0; i < nodes.length; i++) { + for (let j = 0; j < nodes.length; j++) { + if (i !== j) { // Don't create self-loops + // Generate random weight between 1 and 5 + const weight = Math.floor(Math.random() * 5) + 1; + edges.push([nodes[i], nodes[j], weight]); + } + } + } + return edges; +}; + +// Create the huge complete graph with 100 nodes +/* + * B + * ↙↗ ↑ ↖↘ + * A ← → C // Demonstration of the complete graph + * ↖↘ ↓ ↙↗ // Note: Consists out of 100 nodes + * D + */ +const hugeCompleteGraphNodes = generateNodeNames(100); +const hugeCompleteGraphEdges = generateCompleteGraphEdges(hugeCompleteGraphNodes); + +protoGraphs.hugeCompleteGraph = new ProtoGraph("hugeCompleteGraph", + hugeCompleteGraphEdges, + [1, 2, 5], + [ + { + numberOfShards: 1, + vertexSharding: hugeCompleteGraphNodes.map((node, index) => [node, 0]) + }, + { + numberOfShards: 2, + vertexSharding: hugeCompleteGraphNodes.map((node, index) => [node, index % 2]) + }, + { + numberOfShards: 5, + vertexSharding: hugeCompleteGraphNodes.map((node, index) => [node, index % 5]) + } + ] +); + /* * * From a03dc03cfc89e318fab0d6f0922d968e4d909a1c Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Tue, 13 May 2025 16:10:23 +0200 Subject: [PATCH 05/19] added query cancel observer to twosided enumerator as well, so kpaths shortest paths and kshortestpath, also added tests --- .../Aql/ExecutionNode/EnumeratePathsNode.cpp | 2 +- .../Aql/ExecutionNode/ShortestPathNode.cpp | 2 +- .../Graph/Enumerators/TwoSidedEnumerator.cpp | 15 +- .../Options/TwoSidedEnumeratorOptions.cpp | 5 +- .../Graph/Options/TwoSidedEnumeratorOptions.h | 10 +- ...h-traversal-generic-bidirectional-tests.js | 284 ++++++++++++++++++ .../aql-graph-traversal-generic-graphs.js | 81 +++++ .../aql-graph-traversal-generic-tests.js | 26 ++ tests/Graph/AllShortestPathsFinderTest.cpp | 2 +- tests/Graph/KPathFinderTest.cpp | 2 +- tests/Graph/KShortestPathsFinderTest.cpp | 4 +- tests/Graph/WeightedShortestPathTest.cpp | 2 +- 12 files changed, 420 insertions(+), 15 deletions(-) create mode 100644 js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js diff --git a/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp b/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp index 196d8f7207cf..e1951841c036 100644 --- a/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp +++ b/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp @@ -432,7 +432,7 @@ std::unique_ptr EnumeratePathsNode::createBlock( TRI_ASSERT(pathType() != arangodb::graph::PathType::Type::ShortestPath); arangodb::graph::TwoSidedEnumeratorOptions enumeratorOptions{ - opts->getMinDepth(), opts->getMaxDepth(), pathType()}; + opts->getMinDepth(), opts->getMaxDepth(), pathType(), opts->query()}; PathValidatorOptions validatorOptions(opts->tmpVar(), opts->getExpressionCtx()); diff --git a/arangod/Aql/ExecutionNode/ShortestPathNode.cpp b/arangod/Aql/ExecutionNode/ShortestPathNode.cpp index 11425f15f738..2e7f89328768 100644 --- a/arangod/Aql/ExecutionNode/ShortestPathNode.cpp +++ b/arangod/Aql/ExecutionNode/ShortestPathNode.cpp @@ -419,7 +419,7 @@ std::unique_ptr ShortestPathNode::createBlock( arangodb::graph::TwoSidedEnumeratorOptions enumeratorOptions{ 0, std::numeric_limits::max(), - arangodb::graph::PathType::Type::ShortestPath}; + arangodb::graph::PathType::Type::ShortestPath, opts->query()}; PathValidatorOptions validatorOptions(opts->tmpVar(), opts->getExpressionCtx()); diff --git a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp index 60b6ede7d435..e0f37bb1530e 100644 --- a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp @@ -197,6 +197,11 @@ template:: Ball::computeNeighbourhoodOfNextVertex(Ball& other, ResultList& results) -> void { + if (_graphOptions.isKilled()) { + clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + // Pull next element from Queue // Do 1 step search TRI_ASSERT(!_queue.isEmpty()); @@ -248,8 +253,8 @@ auto TwoSidedEnumerator:: template -void TwoSidedEnumerator:: - Ball::testDepthZero(Ball& other, ResultList& results) { +void TwoSidedEnumerator::Ball::testDepthZero(Ball& other, ResultList& results) { for (auto const& step : _shell) { other.matchResultsInShell(step, results, _validator); } @@ -286,9 +291,9 @@ auto TwoSidedEnumerator:: template -auto TwoSidedEnumerator:: - Ball::buildPath(Step const& vertexInShell, - PathResult& path) -> void { +auto TwoSidedEnumerator::Ball::buildPath(Step const& vertexInShell, + PathResult& path) -> void { if (_direction == FORWARD) { _interior.buildPath(vertexInShell, path); } else { diff --git a/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp b/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp index 894b52305888..7c1e09416adb 100644 --- a/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp +++ b/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp @@ -29,8 +29,9 @@ using namespace arangodb::graph; TwoSidedEnumeratorOptions::TwoSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, - PathType::Type pathType) - : _minDepth(minDepth), _maxDepth(maxDepth), _pathType(pathType) { + PathType::Type pathType, + aql::QueryContext& query) + : _minDepth(minDepth), _maxDepth(maxDepth), _pathType(pathType), _observer(query) { if (getPathType() == PathType::Type::AllShortestPaths) { setStopAtFirstDepth(true); } else if (getPathType() == PathType::Type::ShortestPath) { diff --git a/arangod/Graph/Options/TwoSidedEnumeratorOptions.h b/arangod/Graph/Options/TwoSidedEnumeratorOptions.h index 226a5222232d..a998e2186816 100644 --- a/arangod/Graph/Options/TwoSidedEnumeratorOptions.h +++ b/arangod/Graph/Options/TwoSidedEnumeratorOptions.h @@ -25,17 +25,23 @@ #pragma once #include "Graph/PathType.h" +#include "Graph/Options/QueryContextObserver.h" #include #include namespace arangodb { + +namespace aql { +class QueryContext; +} + namespace graph { struct TwoSidedEnumeratorOptions { public: TwoSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, - PathType::Type pathType); + PathType::Type pathType, aql::QueryContext& query); ~TwoSidedEnumeratorOptions(); @@ -46,6 +52,7 @@ struct TwoSidedEnumeratorOptions { [[nodiscard]] PathType::Type getPathType() const; [[nodiscard]] bool getStopAtFirstDepth() const; [[nodiscard]] bool onlyProduceOnePath() const; + [[nodiscard]] bool isKilled() const noexcept { return _observer.isKilled(); } void setStopAtFirstDepth(bool stopAtFirstDepth); void setOnlyProduceOnePath(bool onlyProduceOnePath); @@ -57,6 +64,7 @@ struct TwoSidedEnumeratorOptions { bool _stopAtFirstDepth{false}; bool _onlyProduceOnePath{false}; PathType::Type _pathType; + QueryContextObserver _observer; }; } // namespace graph } // namespace arangodb diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js new file mode 100644 index 000000000000..c6648911e527 --- /dev/null +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js @@ -0,0 +1,284 @@ +const jsunity = require("jsunity"); +const {assertTrue} = jsunity.jsUnity.assertions; +const protoGraphs = require('@arangodb/testutils/aql-graph-traversal-generic-graphs').protoGraphs; +const arango = internal.arango; + +// seconds to add to execution time for verification +// This is to account for the time it takes for the query to be scheduled and executed +// and for the query to be killed +const VERIFICATION_TIME_BUFFER = 3; + +const localHelper = { + getRunningQueries: function() { + return arango.GET('/_api/query/current'); + }, + normalizeQueryString: function(query) { + // Remove extra whitespace, newlines and normalize spaces + return query.replace(/\s+/g, ' ').trim(); + }, + checkRunningQuery: function(queryString, maxAttempts = 10, debug = false) { + const normalizedQueryString = this.normalizeQueryString(queryString); + if (debug) { + print("Checking for running query:", normalizedQueryString); + } + for (let i = 0; i < maxAttempts; i++) { + const runningQueries = this.getRunningQueries(); + if (debug) { + print("Attempt", i + 1, "of", maxAttempts); + print("Number of running queries:", runningQueries.length); + } + const matchingQuery = runningQueries.find(query => + this.normalizeQueryString(query.query) === normalizedQueryString + ); + if (matchingQuery) { + if (debug) { + print("Found matching query with ID:", matchingQuery.id); + } + return matchingQuery; + } + if (debug) { + print("No matching query found, waiting..."); + } + internal.sleep(1); + } + if (debug) { + print("Failed to find running query after", maxAttempts, "attempts"); + } + assertTrue(false, "Query not found"); + }, + waitForQueryTermination: function(queryId, maxAttempts = 10) { + for (let i = 0; i < maxAttempts; i++) { + const runningQueries = this.getRunningQueries(); + const queryStillRunning = runningQueries.some(query => query.id === queryId); + if (!queryStillRunning) { + return true; + } + internal.sleep(1); + } + assertTrue(false, "Query did not terminate within expected time"); + }, + killQuery: function(queryId) { + const dbName = db._name(); + const response = arango.DELETE('/_db/' + dbName + '/_api/query/' + queryId); + assertEqual(response.code, 200); + return response; + }, + executeAsyncQuery: function(queryString, debug = false, disableAsyncHeader = false) { + // This helper will just accept the query. It will return the async id. + if (debug) { + print("Executing async query:", this.normalizeQueryString(queryString)); + } + const headers = disableAsyncHeader ? {} : {'x-arango-async': 'store'}; + const response = arango.POST_RAW('/_api/cursor', + JSON.stringify({ + query: queryString, + bindVars: {} + }), + headers + ); + if (debug) { + if (disableAsyncHeader) { + print(response) + } + print("Async query response code:", response.code); + print("Async query ID:", response.headers['x-arango-async-id']); + } + assertEqual(response.code, 202); + assertTrue(response.headers.hasOwnProperty("x-arango-async-id")); + + return response.headers['x-arango-async-id']; + }, + checkJobStatus: function(jobId, maxAttempts = 10) { + for (let i = 0; i < maxAttempts; i++) { + const response = arango.GET_RAW('/_api/job/' + jobId); + if (response.code === 200) { + return response; + } + internal.sleep(1); + } + assertTrue(false, "Job not found"); + }, + testKillLongRunningQuery: function(queryString, debug = false, disableAsyncHeader = false) { + // First run - measure execution time + const startTime = Date.now(); + if (debug) { + print("Starting first query execution..."); + } + const queryJobId = this.executeAsyncQuery(queryString, debug, disableAsyncHeader); + if (debug) { + print("First query job ID:", queryJobId); + } + const runningQuery = this.checkRunningQuery(queryString, 10, debug); + const queryId = runningQuery.id; + if (debug) { + print("First query ID:", queryId); + } + this.killQuery(queryId); + this.checkJobStatus(queryJobId); + this.waitForQueryTermination(queryId); + const firstExecutionTime = (Date.now() - startTime) / 1000; // Convert to seconds + if (debug) { + print("First query execution time:", firstExecutionTime, "seconds"); + } + + // Second run - verify query stays running + if (debug) { + print("Starting second query execution..."); + } + const startTime2 = Date.now(); + const queryJobId2 = this.executeAsyncQuery(queryString, debug, disableAsyncHeader); + if (debug) { + print("Second query job ID:", queryJobId2); + } + const runningQuery2 = this.checkRunningQuery(queryString, 10, debug); + const queryId2 = runningQuery2.id; + if (debug) { + print("Second query ID:", queryId2); + } + + // Wait for executionTime + VERIFICATION_TIME_BUFFER seconds while checking if query is still running + const verificationTime = firstExecutionTime + VERIFICATION_TIME_BUFFER; + if (debug) { + print("Verification time:", verificationTime, "seconds (" + VERIFICATION_TIME_BUFFER + " seconds more than the first query)"); + } + const startVerification = Date.now(); + while ((Date.now() - startVerification) / 1000 < verificationTime) { + const runningQueries = this.getRunningQueries(); + const queryStillRunning = runningQueries.some(query => query.id === queryId2); + if (debug) { + print("Query still running:", queryStillRunning); + } + assertTrue(queryStillRunning, "Query terminated before verification time"); + internal.sleep(1); + } + + // Now kill the query and verify termination + if (debug) { + print("Killing second query..."); + } + this.killQuery(queryId2); + this.checkJobStatus(queryJobId2); + this.waitForQueryTermination(queryId2); + + // Verify that second query ran longer than first query + const secondExecutionTime = (Date.now() - startTime2) / 1000; + if (debug) { + print("Second query execution time:", secondExecutionTime, "seconds"); + } + assertTrue(secondExecutionTime > firstExecutionTime, + `Second query (${secondExecutionTime}s) did not run longer than first query (${firstExecutionTime}s). Kill may not have worked.`); + + if (debug) { + print("Test completed successfully"); + } + } +} + +/* + Bidirectional Circle + - DFS + - BFS + - Weighted Path +*/ + +function testBidirectionalCircleDfsLongRunning(testGraph) { + assertTrue(testGraph.name().startsWith(protoGraphs.bidirectionalCircle.name())); + + const queryString = ` + FOR v, e IN 1..999 OUTBOUND "${testGraph.vertex('A')}" + GRAPH ${testGraph.name()} + OPTIONS {order: "dfs", uniqueVertices: "none", uniqueEdges: "none"} + RETURN v.key + `; + + localHelper.testKillLongRunningQuery(queryString); +} + +function testBidirectionalCircleBfsLongRunning(testGraph) { + assertTrue(testGraph.name().startsWith(protoGraphs.bidirectionalCircle.name())); + + const queryString = ` + FOR v, e IN 1..999 OUTBOUND "${testGraph.vertex('A')}" + GRAPH ${testGraph.name()} + OPTIONS {order: "bfs", uniqueVertices: "none", uniqueEdges: "none"} + RETURN v.key + `; + + localHelper.testKillLongRunningQuery(queryString); +} + +function testBidirectionalCircleWeightedPathLongRunning(testGraph) { + assertTrue(testGraph.name().startsWith(protoGraphs.bidirectionalCircle.name())); + + const queryString = ` + FOR v, e IN 1..999 OUTBOUND "${testGraph.vertex('A')}" + GRAPH ${testGraph.name()} + OPTIONS {order: "weighted", weightAttribute: "${testGraph.weightAttribute()}", uniqueVertices: "none", uniqueEdges: "none"} + RETURN v.key + `; + + localHelper.testKillLongRunningQuery(queryString); +} + +/* + HugeCompleteGraph + - K Paths +*/ + +function testHugeCompleteGraphKPathsLongRunning(testGraph) { + assertTrue(testGraph.name().startsWith(protoGraphs.hugeCompleteGraph.name())); + + const queryString = ` + FOR path IN 1..999 ANY K_PATHS "${testGraph.vertex('A')}" TO "${testGraph.vertex('B')}" + GRAPH ${testGraph.name()} + OPTIONS {useCache: false} + RETURN path + `; + + localHelper.testKillLongRunningQuery(queryString); +} + +/* + HugeGridGraph + - Shortest Path + - All Shortest Paths +*/ + +function testHugeGridGraphShortestPathLongRunning(testGraph) { + assertTrue(testGraph.name().startsWith(protoGraphs.hugeGridGraph.name())); + + const queryString = ` + FOR v, e IN OUTBOUND SHORTEST_PATH "${testGraph.vertex('1')}" TO "${testGraph.vertex('1000000')}" + GRAPH ${testGraph.name()} + OPTIONS {useCache: false} + RETURN v.key + `; + + const debug = false; + localHelper.testKillLongRunningQuery(queryString, debug, debug); +} + +function testHugeGridGraphAllShortestPathsLongRunning(testGraph) { + assertTrue(testGraph.name().startsWith(protoGraphs.hugeGridGraph.name())); + + const queryString = ` + FOR path IN ANY ALL_SHORTEST_PATHS "${testGraph.vertex('1')}" TO "${testGraph.vertex('1000000')}" + GRAPH ${testGraph.name()} + OPTIONS {useCache: false} + RETURN path + `; + + localHelper.testKillLongRunningQuery(queryString); +} + +// DFS, BFS, Weighted Path +exports.testBidirectionalCircleDfsLongRunning = testBidirectionalCircleDfsLongRunning; +exports.testBidirectionalCircleBfsLongRunning = testBidirectionalCircleBfsLongRunning; +exports.testBidirectionalCircleWeightedPathLongRunning = testBidirectionalCircleWeightedPathLongRunning; + +// K Paths +exports.testHugeCompleteGraphKPathsLongRunning = testHugeCompleteGraphKPathsLongRunning; + +// Shortest Path, All Shortest Paths +exports.testHugeGridGraphShortestPathLongRunning = testHugeGridGraphShortestPathLongRunning; +exports.testHugeGridGraphAllShortestPathsLongRunning = testHugeGridGraphAllShortestPathsLongRunning; \ No newline at end of file diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js index cc5abddb7f83..0cf18d74f9e6 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js @@ -239,6 +239,19 @@ class TestGraph { this.verticesByName = TestGraph._fillGraph(this.graphName, this.edges, db[this.vn], db[this.en], this.unconnectedVertices, vertexSharding, this.addProjectionPayload); db[this.en].ensureIndex({type: "persistent", fields: ["_from", graphIndexedAttribute]}); + + // Print first and last 10 vertices for debugging + print("First 10 vertices in graph " + this.graphName + ":"); + const first10Vertices = Object.entries(this.verticesByName).slice(0, 10); + first10Vertices.forEach(([key, id]) => { + print(` ${key}: ${id} (node name: ${key})`); + }); + + print("\nLast 10 vertices in graph " + this.graphName + ":"); + const last10Vertices = Object.entries(this.verticesByName).slice(-10); + last10Vertices.forEach(([key, id]) => { + print(` ${key}: ${id} (node name: ${key})`); + }); } name() { @@ -908,6 +921,74 @@ protoGraphs.hugeCompleteGraph = new ProtoGraph("hugeCompleteGraph", ] ); +/* + * Grid Graph Structure (1000x1000) + * Each node connects to its right and bottom neighbors + * + * 1 → 2 → 3 → ... → 1000 + * ↓ ↓ ↓ ↓ + * 1001 → 1002 → 1003 → ... → 2000 + * ↓ ↓ ↓ ↓ + * ... ... ... ... + * ↓ ↓ ↓ ↓ + * 999001 → 999002 → 999003 → ... → 1000000 + */ + +// Generate grid graph with 1000x1000 nodes +const generateGridGraph = (width, height) => { + const nodes = []; + const edges = []; + + // Generate node names as simple numbers + const getNodeName = (row, col) => { + // Calculate node number: row * width + col + 1 + const nodeNum = (row * width + col + 1).toString(); + return nodeNum; + }; + + // Generate nodes and edges + for (let row = 0; row < height; row++) { + for (let col = 0; col < width; col++) { + const nodeName = getNodeName(row, col); + nodes.push(nodeName); + + // Connect to right neighbor + if (col < width - 1) { + edges.push([nodeName, getNodeName(row, col + 1), 1]); + } + + // Connect to bottom neighbor + if (row < height - 1) { + edges.push([nodeName, getNodeName(row + 1, col), 1]); + } + } + } + + return { nodes, edges }; +}; + +const gridSize = 1000; +const gridGraph = generateGridGraph(gridSize, gridSize); + +protoGraphs.hugeGridGraph = new ProtoGraph("hugeGridGraph", + gridGraph.edges, + [1, 2, 5], + [ + { + numberOfShards: 1, + vertexSharding: gridGraph.nodes.map((node, index) => [node, 0]) + }, + { + numberOfShards: 2, + vertexSharding: gridGraph.nodes.map((node, index) => [node, index % 2]) + }, + { + numberOfShards: 5, + vertexSharding: gridGraph.nodes.map((node, index) => [node, index % 5]) + } + ] +); + /* * * diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js index f9c3720012bd..c8701cb31882 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js @@ -39,6 +39,20 @@ const {getCompactStatsNodes, TraversalBlock} = require("@arangodb/testutils/aql- const {findExecutionNodes} = require("@arangodb/aql-helper"); const isCluster = require("internal").isCluster(); let IM = global.instanceManager; +const bidirectionalMethods = require('@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests'); +const bidirectionalCircle = { + testBidirectionalCircleDfsLongRunning: bidirectionalMethods.testBidirectionalCircleDfsLongRunning, + testBidirectionalCircleBfsLongRunning: bidirectionalMethods.testBidirectionalCircleBfsLongRunning, + testBidirectionalCircleWeightedPathLongRunning: bidirectionalMethods.testBidirectionalCircleWeightedPathLongRunning, +} +const hugeCompleteGraph = { + testHugeCompleteGraphKPathsLongRunning: bidirectionalMethods.testHugeCompleteGraphKPathsLongRunning, +} +const hugeGridGraph = { + testHugeGridGraphShortestPathLongRunning: bidirectionalMethods.testHugeGridGraphShortestPathLongRunning, + testHugeGridGraphAllShortestPathsLongRunning: bidirectionalMethods.testHugeGridGraphAllShortestPathsLongRunning, +} + /* TODO: We need more tests to cover the following things: @@ -6990,6 +7004,18 @@ const testsByGraph = { testEmptyGraphBfsPath, testEmptyGraphDfsPath, testEmptyGraphWeightedPath, + }, + // Used for DFS, BFS, Weighted Path - Query Cancellation + bidirectionalCircle: { + ...bidirectionalCircle + }, + // Used for K Paths - Query Cancellation + hugeCompleteGraph: { + ...hugeCompleteGraph + }, + // Used for Shortest Path, All Shortest Paths - Query Cancellation + hugeGridGraph: { + ...hugeGridGraph } }; diff --git a/tests/Graph/AllShortestPathsFinderTest.cpp b/tests/Graph/AllShortestPathsFinderTest.cpp index 366eec83eb77..ea7f190a9ec5 100644 --- a/tests/Graph/AllShortestPathsFinderTest.cpp +++ b/tests/Graph/AllShortestPathsFinderTest.cpp @@ -149,7 +149,7 @@ class AllShortestPathsFinderTest arangodb::graph::PathType::Type pathType = arangodb::graph::PathType::Type::AllShortestPaths; arangodb::graph::TwoSidedEnumeratorOptions options{minDepth, maxDepth, - pathType}; + pathType, *_query}; PathValidatorOptions validatorOpts{&_tmpVar, _expressionContext}; return AllShortestPathsFinder{ MockGraphProvider( diff --git a/tests/Graph/KPathFinderTest.cpp b/tests/Graph/KPathFinderTest.cpp index 6a2df9f99e96..b71d34744ee3 100644 --- a/tests/Graph/KPathFinderTest.cpp +++ b/tests/Graph/KPathFinderTest.cpp @@ -169,7 +169,7 @@ class KPathFinderTest arangodb::graph::PathType::Type pathType = arangodb::graph::PathType::Type::KPaths; arangodb::graph::TwoSidedEnumeratorOptions options{minDepth, maxDepth, - pathType}; + pathType, *_query}; options.setStopAtFirstDepth(false); PathValidatorOptions validatorOpts{&_tmpVar, _expressionContext}; return KPathFinder{ diff --git a/tests/Graph/KShortestPathsFinderTest.cpp b/tests/Graph/KShortestPathsFinderTest.cpp index 6d46e363aac2..9b1c26c7d8ca 100644 --- a/tests/Graph/KShortestPathsFinderTest.cpp +++ b/tests/Graph/KShortestPathsFinderTest.cpp @@ -198,7 +198,7 @@ class KShortestPathsFinderTest : public ::testing::Test { arangodb::graph::PathType::Type pathType = arangodb::graph::PathType::Type::KShortestPaths; arangodb::graph::TwoSidedEnumeratorOptions options{minDepth, maxDepth, - pathType}; + pathType, *_query}; options.setStopAtFirstDepth(false); PathValidatorOptions validatorOpts{&_tmpVar, _expressionContext}; auto forwardProviderOptions = @@ -450,7 +450,7 @@ class WeightedKShortestPathsFinderTest : public ::testing::Test { arangodb::graph::PathType::Type pathType = arangodb::graph::PathType::Type::KShortestPaths; arangodb::graph::TwoSidedEnumeratorOptions options{minDepth, maxDepth, - pathType}; + pathType, *_query}; options.setStopAtFirstDepth(false); PathValidatorOptions validatorOpts{&_tmpVar, _expressionContext}; auto forwardProviderOptions = diff --git a/tests/Graph/WeightedShortestPathTest.cpp b/tests/Graph/WeightedShortestPathTest.cpp index 15c68dfbe1c2..426609a792c2 100644 --- a/tests/Graph/WeightedShortestPathTest.cpp +++ b/tests/Graph/WeightedShortestPathTest.cpp @@ -150,7 +150,7 @@ class WeightedShortestPathTest arangodb::graph::PathType::Type pathType = arangodb::graph::PathType::Type::ShortestPath; arangodb::graph::TwoSidedEnumeratorOptions options{minDepth, maxDepth, - pathType}; + pathType, *_query}; options.setStopAtFirstDepth(false); PathValidatorOptions validatorOpts{&_tmpVar, _expressionContext}; auto forwardProviderOptions = From 7692de9bfb46d78a8dd9341985c9db11fcb922d8 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Wed, 14 May 2025 13:42:33 +0200 Subject: [PATCH 06/19] only print debug creation info if set manually in constructor --- .../aql-graph-traversal-generic-graphs.js | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js index 0cf18d74f9e6..3e7375185385 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js @@ -148,6 +148,7 @@ class TestGraph { isEnterprise: false, isSatellite: false }; + this.debug = false; } hasProjectionPayload() { @@ -240,18 +241,19 @@ class TestGraph { this.verticesByName = TestGraph._fillGraph(this.graphName, this.edges, db[this.vn], db[this.en], this.unconnectedVertices, vertexSharding, this.addProjectionPayload); db[this.en].ensureIndex({type: "persistent", fields: ["_from", graphIndexedAttribute]}); - // Print first and last 10 vertices for debugging - print("First 10 vertices in graph " + this.graphName + ":"); - const first10Vertices = Object.entries(this.verticesByName).slice(0, 10); - first10Vertices.forEach(([key, id]) => { - print(` ${key}: ${id} (node name: ${key})`); - }); - - print("\nLast 10 vertices in graph " + this.graphName + ":"); - const last10Vertices = Object.entries(this.verticesByName).slice(-10); - last10Vertices.forEach(([key, id]) => { - print(` ${key}: ${id} (node name: ${key})`); - }); + if (this.debug) { + print("First 10 vertices in graph " + this.graphName + ":"); + const first10Vertices = Object.entries(this.verticesByName).slice(0, 10); + first10Vertices.forEach(([key, id]) => { + print(` ${key}: ${id} (node name: ${key})`); + }); + + print("\nLast 10 vertices in graph " + this.graphName + ":"); + const last10Vertices = Object.entries(this.verticesByName).slice(-10); + last10Vertices.forEach(([key, id]) => { + print(` ${key}: ${id} (node name: ${key})`); + }); + } } name() { From 7ef978fb3e904238de43d03d74ec65ea9aaa599d Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Wed, 14 May 2025 13:44:15 +0200 Subject: [PATCH 07/19] revert changelog merge issue --- CHANGELOG | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index e113b6d5e833..2c320afc5419 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,5 +1,5 @@ -3.12.4 (2025-01-23) -------------------- +devel +----- * Updated ArangoDB Starter to v0.19.10. From 67f81764f2eb9921949f922d023a7fac5796b7ba Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Wed, 14 May 2025 13:54:17 +0200 Subject: [PATCH 08/19] added additional check for cancelation of query in the twosidedenumerator --- .../Graph/Enumerators/TwoSidedEnumerator.cpp | 42 ++++++++++++++----- .../Graph/Enumerators/TwoSidedEnumerator.h | 8 +++- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp index e0f37bb1530e..668cebe7c31d 100644 --- a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp @@ -56,7 +56,8 @@ TwoSidedEnumerator:: Ball::Ball(Direction dir, ProviderType&& provider, GraphOptions const& options, PathValidatorOptions validatorOptions, - arangodb::ResourceMonitor& resourceMonitor) + arangodb::ResourceMonitor& resourceMonitor, + TwoSidedEnumerator& parent) : _resourceMonitor(resourceMonitor), _interior(resourceMonitor), _queue(resourceMonitor), @@ -64,7 +65,8 @@ TwoSidedEnumerator:: _validator(_provider, _interior, std::move(validatorOptions)), _direction(dir), _minDepth(options.getMinDepth()), - _graphOptions(options) {} + _graphOptions(options), + _parent(parent) {} template @@ -198,7 +200,12 @@ auto TwoSidedEnumerator:: Ball::computeNeighbourhoodOfNextVertex(Ball& other, ResultList& results) -> void { if (_graphOptions.isKilled()) { + // First clear our own instance (Ball) clear(); + // Then clear the other instance (Ball) + other.clear(); + // Then clear the parent (TwoSidedEnumerator) + _parent.clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } @@ -253,8 +260,8 @@ auto TwoSidedEnumerator:: template -void TwoSidedEnumerator::Ball::testDepthZero(Ball& other, ResultList& results) { +void TwoSidedEnumerator:: + Ball::testDepthZero(Ball& other, ResultList& results) { for (auto const& step : _shell) { other.matchResultsInShell(step, results, _validator); } @@ -291,9 +298,9 @@ auto TwoSidedEnumerator:: template -auto TwoSidedEnumerator::Ball::buildPath(Step const& vertexInShell, - PathResult& path) -> void { +auto TwoSidedEnumerator:: + Ball::buildPath(Step const& vertexInShell, + PathResult& path) -> void { if (_direction == FORWARD) { _interior.buildPath(vertexInShell, path); } else { @@ -317,10 +324,15 @@ TwoSidedEnumerator:: PathValidatorOptions validatorOptions, arangodb::ResourceMonitor& resourceMonitor) : _options(std::move(options)), - _left{Direction::FORWARD, std::move(forwardProvider), _options, - validatorOptions, resourceMonitor}, - _right{Direction::BACKWARD, std::move(backwardProvider), _options, - std::move(validatorOptions), resourceMonitor}, + _left{Direction::FORWARD, std::move(forwardProvider), + _options, validatorOptions, + resourceMonitor, *this}, + _right{Direction::BACKWARD, + std::move(backwardProvider), + _options, + std::move(validatorOptions), + resourceMonitor, + *this}, _baselineDepth(_options.getMaxDepth()), _resultPath{_left.provider(), _right.provider()} {} @@ -456,6 +468,14 @@ void TwoSidedEnumerator::searchMoreResults() { while (_results.empty() && !searchDone()) { _resultsFetched = false; + + // Check for kill signal before proceeding + // We will also do additional checks in computeNeighbourhoodOfNextVertex + if (_options.isKilled()) { + clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + if (_searchLeft) { if (ADB_UNLIKELY(_left.doneWithDepth())) { startNextDepth(); diff --git a/arangod/Graph/Enumerators/TwoSidedEnumerator.h b/arangod/Graph/Enumerators/TwoSidedEnumerator.h index 6dd5d1a53182..a13e0bd18eb1 100644 --- a/arangod/Graph/Enumerators/TwoSidedEnumerator.h +++ b/arangod/Graph/Enumerators/TwoSidedEnumerator.h @@ -126,7 +126,8 @@ class TwoSidedEnumerator { public: Ball(Direction dir, ProviderType&& provider, GraphOptions const& options, PathValidatorOptions validatorOptions, - arangodb::ResourceMonitor& resourceMonitor); + arangodb::ResourceMonitor& resourceMonitor, + TwoSidedEnumerator& parent); ~Ball(); auto clear() -> void; auto reset(VertexRef center, size_t depth = 0) -> void; @@ -186,6 +187,11 @@ class TwoSidedEnumerator { Direction _direction; size_t _minDepth{0}; GraphOptions _graphOptions; + + // Reference to the parent TwoSidedEnumerator + // Intention: To be able to call clear() on the parent + // Case: When a kill signal is received + TwoSidedEnumerator& _parent; }; public: From 2de151262b79ea0af0afab6b0afa535b158a737c Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Wed, 14 May 2025 14:37:04 +0200 Subject: [PATCH 09/19] eslint --- .../testutils/aql-graph-traversal-generic-tests.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js index c8701cb31882..07f7d0db2d3b 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-tests.js @@ -44,14 +44,14 @@ const bidirectionalCircle = { testBidirectionalCircleDfsLongRunning: bidirectionalMethods.testBidirectionalCircleDfsLongRunning, testBidirectionalCircleBfsLongRunning: bidirectionalMethods.testBidirectionalCircleBfsLongRunning, testBidirectionalCircleWeightedPathLongRunning: bidirectionalMethods.testBidirectionalCircleWeightedPathLongRunning, -} +}; const hugeCompleteGraph = { testHugeCompleteGraphKPathsLongRunning: bidirectionalMethods.testHugeCompleteGraphKPathsLongRunning, -} +}; const hugeGridGraph = { testHugeGridGraphShortestPathLongRunning: bidirectionalMethods.testHugeGridGraphShortestPathLongRunning, testHugeGridGraphAllShortestPathsLongRunning: bidirectionalMethods.testHugeGridGraphAllShortestPathsLongRunning, -} +}; /* TODO: From aa29f4e169f229f4599a7d09611849ec2ec7f388 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Fri, 16 May 2025 11:06:57 +0200 Subject: [PATCH 10/19] eslint --- ...l-graph-traversal-generic-bidirectional-tests.js | 13 ++++++++++--- .../testutils/aql-graph-traversal-generic-graphs.js | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js index c6648911e527..971705c268e8 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js @@ -1,6 +1,13 @@ +/*jshint globalstrict:true, strict:true, esnext: true */ +/*global print */ + +"use strict"; + const jsunity = require("jsunity"); -const {assertTrue} = jsunity.jsUnity.assertions; +const {assertTrue, assertEqual} = jsunity.jsUnity.assertions; const protoGraphs = require('@arangodb/testutils/aql-graph-traversal-generic-graphs').protoGraphs; +const internal = require("internal"); +const db = internal.db; const arango = internal.arango; // seconds to add to execution time for verification @@ -78,7 +85,7 @@ const localHelper = { ); if (debug) { if (disableAsyncHeader) { - print(response) + print(response); } print("Async query response code:", response.code); print("Async query ID:", response.headers['x-arango-async-id']); @@ -172,7 +179,7 @@ const localHelper = { print("Test completed successfully"); } } -} +}; /* Bidirectional Circle diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js index 3e7375185385..fab55cbb6b8d 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js @@ -1,4 +1,5 @@ /*jshint globalstrict:true, strict:true, esnext: true */ +/* global print */ "use strict"; From 6f21c2fdfa06ed01a503d7e4b9c86db74af205c7 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Fri, 16 May 2025 11:19:14 +0200 Subject: [PATCH 11/19] clang format --- .../Aql/ExecutionNode/EnumeratePathsNode.cpp | 13 +++++-------- arangod/Aql/ExecutionNode/ShortestPathNode.cpp | 10 ++++------ arangod/Aql/ExecutionNode/TraversalNode.cpp | 5 ++--- .../Graph/Enumerators/OneSidedEnumerator.cpp | 3 ++- .../Options/OneSidedEnumeratorOptions.cpp | 4 +++- .../Graph/Options/OneSidedEnumeratorOptions.h | 3 ++- arangod/Graph/Options/QueryContextObserver.h | 18 +++++++++--------- .../Options/TwoSidedEnumeratorOptions.cpp | 5 ++++- .../Graph/Providers/BaseProviderOptions.cpp | 6 ++---- arangod/Graph/Providers/BaseProviderOptions.h | 17 ++++++----------- 10 files changed, 39 insertions(+), 45 deletions(-) diff --git a/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp b/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp index e1951841c036..3d79e698f8c5 100644 --- a/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp +++ b/arangod/Aql/ExecutionNode/EnumeratePathsNode.cpp @@ -483,8 +483,7 @@ std::unique_ptr EnumeratePathsNode::createBlock( opts->tmpVar(), std::move(reversedUsedIndexes), opts->getExpressionCtx(), {}, opts->collectionToShard(), opts->getVertexProjections(), opts->getEdgeProjections(), - opts->produceVertices(), opts->useCache(), - opts->query()); + opts->produceVertices(), opts->useCache(), opts->query()); using Provider = SingleServerProvider; if (opts->query().queryOptions().getTraversalProfileLevel() == @@ -680,13 +679,11 @@ std::unique_ptr EnumeratePathsNode::createBlock( } else { // Cluster case (on coordinator) auto cache = std::make_shared( opts->query().resourceMonitor()); - ClusterBaseProviderOptions forwardProviderOptions(cache, engines(), false, - opts->produceVertices(), - opts->query()); + ClusterBaseProviderOptions forwardProviderOptions( + cache, engines(), false, opts->produceVertices(), opts->query()); forwardProviderOptions.setClearEdgeCacheOnClear(false); - ClusterBaseProviderOptions backwardProviderOptions(cache, engines(), true, - opts->produceVertices(), - opts->query()); + ClusterBaseProviderOptions backwardProviderOptions( + cache, engines(), true, opts->produceVertices(), opts->query()); backwardProviderOptions.setClearEdgeCacheOnClear(false); // A comment is in order here: For all cases covered here // (k-shortest-paths, all shortest paths, k-paths) we do not need to diff --git a/arangod/Aql/ExecutionNode/ShortestPathNode.cpp b/arangod/Aql/ExecutionNode/ShortestPathNode.cpp index 2e7f89328768..d31d72eb27d6 100644 --- a/arangod/Aql/ExecutionNode/ShortestPathNode.cpp +++ b/arangod/Aql/ExecutionNode/ShortestPathNode.cpp @@ -511,12 +511,10 @@ std::unique_ptr ShortestPathNode::createBlock( using ClusterProvider = ClusterProvider; auto cache = std::make_shared( opts->query().resourceMonitor()); - ClusterBaseProviderOptions forwardProviderOptions(cache, engines(), false, - opts->produceVertices(), - opts->query()); - ClusterBaseProviderOptions backwardProviderOptions(cache, engines(), true, - opts->produceVertices(), - opts->query()); + ClusterBaseProviderOptions forwardProviderOptions( + cache, engines(), false, opts->produceVertices(), opts->query()); + ClusterBaseProviderOptions backwardProviderOptions( + cache, engines(), true, opts->produceVertices(), opts->query()); auto usesWeight = checkWeight(forwardProviderOptions, backwardProviderOptions); diff --git a/arangod/Aql/ExecutionNode/TraversalNode.cpp b/arangod/Aql/ExecutionNode/TraversalNode.cpp index 3fdf08ac3903..1b0b634a9d0a 100644 --- a/arangod/Aql/ExecutionNode/TraversalNode.cpp +++ b/arangod/Aql/ExecutionNode/TraversalNode.cpp @@ -817,9 +817,8 @@ std::unique_ptr TraversalNode::createBlock( bool isSmart) const { TraverserOptions* opts = this->options(); - arangodb::graph::OneSidedEnumeratorOptions options{opts->minDepth, - opts->maxDepth, - opts->query()}; + arangodb::graph::OneSidedEnumeratorOptions options{ + opts->minDepth, opts->maxDepth, opts->query()}; /* * PathValidator Disjoint Helper (TODO [GraphRefactor]: Copy from createBlock) * Clean this up as soon we clean up the whole TraversalNode as well. diff --git a/arangod/Graph/Enumerators/OneSidedEnumerator.cpp b/arangod/Graph/Enumerators/OneSidedEnumerator.cpp index c55db9aa159b..75da2731558f 100644 --- a/arangod/Graph/Enumerators/OneSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/OneSidedEnumerator.cpp @@ -109,7 +109,8 @@ void OneSidedEnumerator::clearProvider() { template void OneSidedEnumerator::computeNeighbourhoodOfNextVertex() { if (_options.isKilled()) { - // Clear false may sounds misleading, but this means we do not want to keep the path store + // Clear false may sounds misleading, but this means we do not want to keep + // the path store clear(false); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } diff --git a/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp b/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp index 2bf295218e5d..446e65d25566 100644 --- a/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp +++ b/arangod/Graph/Options/OneSidedEnumeratorOptions.cpp @@ -27,7 +27,9 @@ using namespace arangodb; using namespace arangodb::graph; -OneSidedEnumeratorOptions::OneSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, aql::QueryContext& query) +OneSidedEnumeratorOptions::OneSidedEnumeratorOptions(size_t minDepth, + size_t maxDepth, + aql::QueryContext& query) : _minDepth(minDepth), _maxDepth(maxDepth), _observer(query) {} OneSidedEnumeratorOptions::~OneSidedEnumeratorOptions() = default; diff --git a/arangod/Graph/Options/OneSidedEnumeratorOptions.h b/arangod/Graph/Options/OneSidedEnumeratorOptions.h index c1519906e55d..4c5f7a192735 100644 --- a/arangod/Graph/Options/OneSidedEnumeratorOptions.h +++ b/arangod/Graph/Options/OneSidedEnumeratorOptions.h @@ -33,7 +33,8 @@ namespace arangodb::graph { struct OneSidedEnumeratorOptions { public: - OneSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, aql::QueryContext& query); + OneSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, + aql::QueryContext& query); ~OneSidedEnumeratorOptions(); [[nodiscard]] size_t getMinDepth() const noexcept; diff --git a/arangod/Graph/Options/QueryContextObserver.h b/arangod/Graph/Options/QueryContextObserver.h index d7afffc0cab9..c4f131b5d7ec 100644 --- a/arangod/Graph/Options/QueryContextObserver.h +++ b/arangod/Graph/Options/QueryContextObserver.h @@ -26,14 +26,14 @@ #include "Aql/QueryContext.h" -// This class serves as a wrapper around QueryContext to explicitly track where query killing -// is being used in the graph traversal code. It provides a single point of access to check -// if a query has been killed, making it easier to maintain and modify the query killing -// behavior if needed. +// This class serves as a wrapper around QueryContext to explicitly track where +// query killing is being used in the graph traversal code. It provides a single +// point of access to check if a query has been killed, making it easier to +// maintain and modify the query killing behavior if needed. // -// While this adds a small layer of indirection, it helps with code clarity and maintainability. -// If profiling shows this wrapper causes significant overhead, we can remove it and use -// QueryContext directly. +// While this adds a small layer of indirection, it helps with code clarity and +// maintainability. If profiling shows this wrapper causes significant overhead, +// we can remove it and use QueryContext directly. // // We can change this or discuss if this approach is not liked. @@ -42,11 +42,11 @@ namespace arangodb::graph { class QueryContextObserver { public: explicit QueryContextObserver(aql::QueryContext& query) : _query(query) {} - + [[nodiscard]] bool isKilled() const { return _query.killed(); } private: aql::QueryContext& _query; }; -} // namespace arangodb::graph \ No newline at end of file +} // namespace arangodb::graph \ No newline at end of file diff --git a/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp b/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp index 7c1e09416adb..6a7669f5e771 100644 --- a/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp +++ b/arangod/Graph/Options/TwoSidedEnumeratorOptions.cpp @@ -31,7 +31,10 @@ TwoSidedEnumeratorOptions::TwoSidedEnumeratorOptions(size_t minDepth, size_t maxDepth, PathType::Type pathType, aql::QueryContext& query) - : _minDepth(minDepth), _maxDepth(maxDepth), _pathType(pathType), _observer(query) { + : _minDepth(minDepth), + _maxDepth(maxDepth), + _pathType(pathType), + _observer(query) { if (getPathType() == PathType::Type::AllShortestPaths) { setStopAtFirstDepth(true); } else if (getPathType() == PathType::Type::ShortestPath) { diff --git a/arangod/Graph/Providers/BaseProviderOptions.cpp b/arangod/Graph/Providers/BaseProviderOptions.cpp index a0cc669dcd4e..4e7cef901842 100644 --- a/arangod/Graph/Providers/BaseProviderOptions.cpp +++ b/arangod/Graph/Providers/BaseProviderOptions.cpp @@ -86,8 +86,7 @@ SingleServerBaseProviderOptions::SingleServerBaseProviderOptions( MonitoredCollectionToShardMap const& collectionToShardMap, aql::Projections const& vertexProjections, aql::Projections const& edgeProjections, bool produceVertices, - bool useCache, - aql::QueryContext& query) + bool useCache, aql::QueryContext& query) : _temporaryVariable(tmpVar), _indexInformation(std::move(indexInfo)), _expressionContext(expressionContext), @@ -171,8 +170,7 @@ void SingleServerBaseProviderOptions::unPrepareContext() { ClusterBaseProviderOptions::ClusterBaseProviderOptions( std::shared_ptr cache, std::unordered_map const* engines, bool backward, - bool produceVertices, - aql::QueryContext& query) + bool produceVertices, aql::QueryContext& query) : _cache(std::move(cache)), _engines(engines), _backward(backward), diff --git a/arangod/Graph/Providers/BaseProviderOptions.h b/arangod/Graph/Providers/BaseProviderOptions.h index 2d0f79fecc89..3718e0edf431 100644 --- a/arangod/Graph/Providers/BaseProviderOptions.h +++ b/arangod/Graph/Providers/BaseProviderOptions.h @@ -100,10 +100,10 @@ struct SingleServerBaseProviderOptions { MonitoredCollectionToShardMap const& collectionToShardMap, aql::Projections const& vertexProjections, aql::Projections const& edgeProjections, bool produceVertices, - bool useCache, - aql::QueryContext& query); + bool useCache, aql::QueryContext& query); - SingleServerBaseProviderOptions(SingleServerBaseProviderOptions const&) = delete; + SingleServerBaseProviderOptions(SingleServerBaseProviderOptions const&) = + delete; SingleServerBaseProviderOptions(SingleServerBaseProviderOptions&&) = default; aql::Variable const* tmpVar() const; @@ -133,9 +133,7 @@ struct SingleServerBaseProviderOptions { aql::Projections const& getEdgeProjections() const; - bool isKilled() const noexcept { - return _queryObserver.isKilled(); - } + bool isKilled() const noexcept { return _queryObserver.isKilled(); } private: // The temporary Variable used in the Indexes @@ -188,8 +186,7 @@ struct ClusterBaseProviderOptions { ClusterBaseProviderOptions( std::shared_ptr cache, std::unordered_map const* engines, bool backward, - bool produceVertices, - aql::QueryContext& query); + bool produceVertices, aql::QueryContext& query); ClusterBaseProviderOptions( std::shared_ptr cache, @@ -236,9 +233,7 @@ struct ClusterBaseProviderOptions { _clearEdgeCacheOnClear = flag; } - bool isKilled() const noexcept { - return _queryObserver.isKilled(); - } + bool isKilled() const noexcept { return _queryObserver.isKilled(); } private: std::shared_ptr _cache; From af166b8f54972c2f87b69ec1869700d1e1ddd1f3 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Fri, 16 May 2025 11:27:54 +0200 Subject: [PATCH 12/19] clang format --- tests/Graph/DFSFinderTest.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/Graph/DFSFinderTest.cpp b/tests/Graph/DFSFinderTest.cpp index a6559c6b80bd..571853cc4c32 100644 --- a/tests/Graph/DFSFinderTest.cpp +++ b/tests/Graph/DFSFinderTest.cpp @@ -174,7 +174,8 @@ class DFSFinderTest } auto pathFinder(size_t minDepth, size_t maxDepth) -> DFSFinder { - arangodb::graph::OneSidedEnumeratorOptions options{minDepth, maxDepth, *_query.get()}; + arangodb::graph::OneSidedEnumeratorOptions options{minDepth, maxDepth, + *_query.get()}; PathValidatorOptions validatorOpts{&_tmpVar, _expressionContext}; return DFSFinder( {*_query.get(), From e22c7c3e870dcaa729b45a271164d0240ce245e8 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Mon, 19 May 2025 10:49:31 +0200 Subject: [PATCH 13/19] batchwise verted and edge insertion during fillGraph (integration tests) --- .../aql-graph-traversal-generic-graphs.js | 36 ++++++++++++++----- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js index fab55cbb6b8d..ed5698927f4b 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js @@ -401,16 +401,25 @@ class TestGraph { toSave.push(doc); keys.push(vertexKey); } - // Save all vertices in one request, and map the result back to the input. - // This should speed up the tests. - vc.save(toSave).forEach((d, i) => { - verticesByName[keys[i]] = d._id; - return null; - }); + + // Process vertices in batches of 100k + const VERTEX_BATCH_SIZE = 100000; + for (let i = 0; i < toSave.length; i += VERTEX_BATCH_SIZE) { + const batch = toSave.slice(i, i + VERTEX_BATCH_SIZE); + const batchKeys = keys.slice(i, i + VERTEX_BATCH_SIZE); + if (this.debug) { + print(`Saving vertex batch ${i} of ${toSave.length}`); + } + vc.save(batch).forEach((d, idx) => { + verticesByName[batchKeys[idx]] = d._id; + return null; + }); + } } - // Save all edges in one request - ec.save(edges.map(([v, w, weight]) => { + // Process edges in batches of 100k + const EDGE_BATCH_SIZE = 100000; + const edgeDocs = edges.map(([v, w, weight]) => { const edge = { _from: verticesByName[v], _to: verticesByName[w], @@ -431,7 +440,16 @@ class TestGraph { } return edge; - })); + }); + + // Save edges in batches + for (let i = 0; i < edgeDocs.length; i += EDGE_BATCH_SIZE) { + const batch = edgeDocs.slice(i, i + EDGE_BATCH_SIZE); + if (this.debug) { + print(`Saving edge batch ${i} of ${edgeDocs.length}`); + } + ec.save(batch); + } return verticesByName; } From 033326af75216733414fcd56cc68ef5144840c96 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Mon, 19 May 2025 11:40:49 +0200 Subject: [PATCH 14/19] try to help garbage collection v8 --- .../aql-graph-traversal-generic-graphs.js | 50 +++++++++++++------ 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js index ed5698927f4b..98263a4fbedd 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-graphs.js @@ -402,8 +402,8 @@ class TestGraph { keys.push(vertexKey); } - // Process vertices in batches of 100k - const VERTEX_BATCH_SIZE = 100000; + // Process vertices in smaller batches to reduce memory pressure + const VERTEX_BATCH_SIZE = 10000; // Reduced from 100k to 10k for (let i = 0; i < toSave.length; i += VERTEX_BATCH_SIZE) { const batch = toSave.slice(i, i + VERTEX_BATCH_SIZE); const batchKeys = keys.slice(i, i + VERTEX_BATCH_SIZE); @@ -414,21 +414,30 @@ class TestGraph { verticesByName[batchKeys[idx]] = d._id; return null; }); + // Clear references to help garbage collection + batch.length = 0; + batchKeys.length = 0; } + // Clear the full arrays after processing + toSave.length = 0; + keys.length = 0; } - // Process edges in batches of 100k - const EDGE_BATCH_SIZE = 100000; - const edgeDocs = edges.map(([v, w, weight]) => { + // Process edges in smaller batches to reduce memory pressure + const EDGE_BATCH_SIZE = 10000; // Reduced from 100k to 10k + let edgeDocs = []; + let currentBatch = []; + let currentBatchSize = 0; + + // Process edges in a streaming fashion + for (const [v, w, weight] of edges) { const edge = { _from: verticesByName[v], _to: verticesByName[w], - // Will be used in filters of tests. secondFrom: verticesByName[v] }; - // check if our edge also has a weight defined and is a number + if (weight && typeof weight === 'number') { - // if found, add attribute "distance" as weightAttribute to the edge document edge[graphWeightAttribute] = weight; edge[graphIndexedAttribute] = weight; } @@ -439,17 +448,28 @@ class TestGraph { edge.payload3 = payloadGen.next().value; } - return edge; - }); + currentBatch.push(edge); + currentBatchSize++; - // Save edges in batches - for (let i = 0; i < edgeDocs.length; i += EDGE_BATCH_SIZE) { - const batch = edgeDocs.slice(i, i + EDGE_BATCH_SIZE); + // When batch is full, save it and clear memory + if (currentBatchSize >= EDGE_BATCH_SIZE) { + if (this.debug) { + print(`Saving edge batch of size ${currentBatchSize}`); + } + ec.save(currentBatch); + currentBatch = []; + currentBatchSize = 0; + } + } + + // Save any remaining edges + if (currentBatch.length > 0) { if (this.debug) { - print(`Saving edge batch ${i} of ${edgeDocs.length}`); + print(`Saving final edge batch of size ${currentBatch.length}`); } - ec.save(batch); + ec.save(currentBatch); } + return verticesByName; } From 2b790288749fb3f38507e0d808f56c07939de835 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Mon, 19 May 2025 12:55:56 +0200 Subject: [PATCH 15/19] slightly increased timeout to 10s in clsuter, 5s in single server --- .../aql-graph-traversal-generic-bidirectional-tests.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js index 971705c268e8..49c61dcbcadd 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js @@ -10,10 +10,10 @@ const internal = require("internal"); const db = internal.db; const arango = internal.arango; -// seconds to add to execution time for verification +// Seconds to add to execution time for verification. // This is to account for the time it takes for the query to be scheduled and executed -// and for the query to be killed -const VERIFICATION_TIME_BUFFER = 3; +// and killed. +const VERIFICATION_TIME_BUFFER = internal.isCluster() ? 10 : 5; const localHelper = { getRunningQueries: function() { From 7a04512db0966d0a37fd77c3947b0aaa77b296cd Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Mon, 19 May 2025 22:41:00 +0200 Subject: [PATCH 16/19] added early exit to weighted two sided enumerator, yens still missing --- .../WeightedTwoSidedEnumerator.cpp | 29 +++++++++++++++---- .../Enumerators/WeightedTwoSidedEnumerator.h | 12 ++++++-- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp index 11be5302f22d..427afa754e53 100644 --- a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp @@ -58,7 +58,8 @@ WeightedTwoSidedEnumerator< PathValidator>::Ball::Ball(Direction dir, ProviderType&& provider, GraphOptions const& options, PathValidatorOptions validatorOptions, - arangodb::ResourceMonitor& resourceMonitor) + arangodb::ResourceMonitor& resourceMonitor, + WeightedTwoSidedEnumerator& parent) : _resourceMonitor(resourceMonitor), _interior(resourceMonitor), _queue(resourceMonitor), @@ -67,7 +68,8 @@ WeightedTwoSidedEnumerator< _direction(dir), _graphOptions(options), _diameter(-std::numeric_limits::infinity()), - _haveSeenOtherSide(false) {} + _haveSeenOtherSide(false), + _parent(parent) {} template @@ -254,6 +256,16 @@ auto WeightedTwoSidedEnumerator::Ball:: computeNeighbourhoodOfNextVertex(Ball& other, CandidatesStore& candidates) -> void { + if (_graphOptions.isKilled()) { + // First clear our own instance (Ball) + clear(); + // Then clear the other instance (Ball) + other.clear(); + // Then clear the parent (WeightedTwoSidedEnumerator) + _parent.clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } + ensureQueueHasProcessableElement(); auto tmp = _queue.pop(); @@ -414,9 +426,9 @@ WeightedTwoSidedEnumerator bool WeightedTwoSidedEnumerator::isDone() const { + PathValidator>::isDone() { if (!_candidatesStore.isEmpty()) { return false; } @@ -861,7 +873,12 @@ WeightedTwoSidedEnumerator auto WeightedTwoSidedEnumerator::searchDone() const -> bool { + PathValidator>::searchDone() -> bool { + if (_options.isKilled()) { + // Here we're not inside a Ball, so we can clear via main clear method + clear(); + THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); + } if ((_left.noPathLeft() && _right.noPathLeft()) || isAlgorithmFinished()) { return true; } diff --git a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h index 1d69bd73784e..57fc0dae0f37 100644 --- a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h +++ b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h @@ -221,7 +221,8 @@ class WeightedTwoSidedEnumerator { public: Ball(Direction dir, ProviderType&& provider, GraphOptions const& options, PathValidatorOptions validatorOptions, - arangodb::ResourceMonitor& resourceMonitor); + arangodb::ResourceMonitor& resourceMonitor, + WeightedTwoSidedEnumerator& parent); ~Ball(); auto clear() -> void; auto reset(VertexRef center, size_t depth = 0) -> void; @@ -296,6 +297,11 @@ class WeightedTwoSidedEnumerator { GraphOptions _graphOptions; double _diameter = -std::numeric_limits::infinity(); bool _haveSeenOtherSide; + + // Reference to the parent WeightedTwoSidedEnumerator + // Intention: To be able to call clear() on the parent + // Case: When a kill signal is received + WeightedTwoSidedEnumerator& _parent; }; enum BallSearchLocation { LEFT, RIGHT, FINISH }; @@ -345,7 +351,7 @@ class WeightedTwoSidedEnumerator { * @return true There will be no further path. * @return false There is a chance that there is more data available. */ - [[nodiscard]] bool isDone() const; + [[nodiscard]] bool isDone(); /** * @brief Reset to new source and target vertices. @@ -410,7 +416,7 @@ class WeightedTwoSidedEnumerator { _right.setForbiddenEdges(std::move(forbidden)); }; - private : [[nodiscard]] auto searchDone() const -> bool; + private : [[nodiscard]] auto searchDone() -> bool; // Ensure that we have fetched all vertices in the _results list. Otherwise, // we will not be able to generate the resulting path auto fetchResults() -> void; From cc7991d6d858705f47d338ca2369e3457bd70b18 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Mon, 19 May 2025 22:46:10 +0200 Subject: [PATCH 17/19] debug flag on --- ...l-graph-traversal-generic-bidirectional-tests.js | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js index 49c61dcbcadd..217355fb3dc9 100644 --- a/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js +++ b/js/client/modules/@arangodb/testutils/aql-graph-traversal-generic-bidirectional-tests.js @@ -198,7 +198,7 @@ function testBidirectionalCircleDfsLongRunning(testGraph) { RETURN v.key `; - localHelper.testKillLongRunningQuery(queryString); + localHelper.testKillLongRunningQuery(queryString, true); } function testBidirectionalCircleBfsLongRunning(testGraph) { @@ -211,7 +211,7 @@ function testBidirectionalCircleBfsLongRunning(testGraph) { RETURN v.key `; - localHelper.testKillLongRunningQuery(queryString); + localHelper.testKillLongRunningQuery(queryString, true); } function testBidirectionalCircleWeightedPathLongRunning(testGraph) { @@ -224,7 +224,7 @@ function testBidirectionalCircleWeightedPathLongRunning(testGraph) { RETURN v.key `; - localHelper.testKillLongRunningQuery(queryString); + localHelper.testKillLongRunningQuery(queryString, true); } /* @@ -242,7 +242,7 @@ function testHugeCompleteGraphKPathsLongRunning(testGraph) { RETURN path `; - localHelper.testKillLongRunningQuery(queryString); + localHelper.testKillLongRunningQuery(queryString, true); } /* @@ -261,8 +261,7 @@ function testHugeGridGraphShortestPathLongRunning(testGraph) { RETURN v.key `; - const debug = false; - localHelper.testKillLongRunningQuery(queryString, debug, debug); + localHelper.testKillLongRunningQuery(queryString, true); } function testHugeGridGraphAllShortestPathsLongRunning(testGraph) { @@ -275,7 +274,7 @@ function testHugeGridGraphAllShortestPathsLongRunning(testGraph) { RETURN path `; - localHelper.testKillLongRunningQuery(queryString); + localHelper.testKillLongRunningQuery(queryString, true); } // DFS, BFS, Weighted Path From 5c4cb55461be5dfcce49d25e4f6e1770e685bc8c Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Wed, 21 May 2025 00:13:24 +0200 Subject: [PATCH 18/19] no need to call clear manually. will be called in destructor automatically --- .../Graph/Enumerators/OneSidedEnumerator.cpp | 3 --- .../Graph/Enumerators/TwoSidedEnumerator.cpp | 18 ++++-------------- arangod/Graph/Enumerators/TwoSidedEnumerator.h | 8 +------- .../Enumerators/WeightedTwoSidedEnumerator.cpp | 18 ++++-------------- .../Enumerators/WeightedTwoSidedEnumerator.h | 8 +------- arangod/Graph/Providers/ClusterProvider.cpp | 3 --- .../Graph/Providers/SingleServerProvider.cpp | 1 - 7 files changed, 10 insertions(+), 49 deletions(-) diff --git a/arangod/Graph/Enumerators/OneSidedEnumerator.cpp b/arangod/Graph/Enumerators/OneSidedEnumerator.cpp index 75da2731558f..0a846670a2e4 100644 --- a/arangod/Graph/Enumerators/OneSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/OneSidedEnumerator.cpp @@ -109,9 +109,6 @@ void OneSidedEnumerator::clearProvider() { template void OneSidedEnumerator::computeNeighbourhoodOfNextVertex() { if (_options.isKilled()) { - // Clear false may sounds misleading, but this means we do not want to keep - // the path store - clear(false); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } diff --git a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp index 668cebe7c31d..e5e7593a5c66 100644 --- a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp @@ -56,8 +56,7 @@ TwoSidedEnumerator:: Ball::Ball(Direction dir, ProviderType&& provider, GraphOptions const& options, PathValidatorOptions validatorOptions, - arangodb::ResourceMonitor& resourceMonitor, - TwoSidedEnumerator& parent) + arangodb::ResourceMonitor& resourceMonitor) : _resourceMonitor(resourceMonitor), _interior(resourceMonitor), _queue(resourceMonitor), @@ -65,8 +64,7 @@ TwoSidedEnumerator:: _validator(_provider, _interior, std::move(validatorOptions)), _direction(dir), _minDepth(options.getMinDepth()), - _graphOptions(options), - _parent(parent) {} + _graphOptions(options) {} template @@ -200,12 +198,6 @@ auto TwoSidedEnumerator:: Ball::computeNeighbourhoodOfNextVertex(Ball& other, ResultList& results) -> void { if (_graphOptions.isKilled()) { - // First clear our own instance (Ball) - clear(); - // Then clear the other instance (Ball) - other.clear(); - // Then clear the parent (TwoSidedEnumerator) - _parent.clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } @@ -326,13 +318,12 @@ TwoSidedEnumerator:: : _options(std::move(options)), _left{Direction::FORWARD, std::move(forwardProvider), _options, validatorOptions, - resourceMonitor, *this}, + resourceMonitor}, _right{Direction::BACKWARD, std::move(backwardProvider), _options, std::move(validatorOptions), - resourceMonitor, - *this}, + resourceMonitor}, _baselineDepth(_options.getMaxDepth()), _resultPath{_left.provider(), _right.provider()} {} @@ -472,7 +463,6 @@ void TwoSidedEnumerator void; auto reset(VertexRef center, size_t depth = 0) -> void; @@ -187,11 +186,6 @@ class TwoSidedEnumerator { Direction _direction; size_t _minDepth{0}; GraphOptions _graphOptions; - - // Reference to the parent TwoSidedEnumerator - // Intention: To be able to call clear() on the parent - // Case: When a kill signal is received - TwoSidedEnumerator& _parent; }; public: diff --git a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp index 427afa754e53..5ca32ce2479c 100644 --- a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.cpp @@ -58,8 +58,7 @@ WeightedTwoSidedEnumerator< PathValidator>::Ball::Ball(Direction dir, ProviderType&& provider, GraphOptions const& options, PathValidatorOptions validatorOptions, - arangodb::ResourceMonitor& resourceMonitor, - WeightedTwoSidedEnumerator& parent) + arangodb::ResourceMonitor& resourceMonitor) : _resourceMonitor(resourceMonitor), _interior(resourceMonitor), _queue(resourceMonitor), @@ -68,8 +67,7 @@ WeightedTwoSidedEnumerator< _direction(dir), _graphOptions(options), _diameter(-std::numeric_limits::infinity()), - _haveSeenOtherSide(false), - _parent(parent) {} + _haveSeenOtherSide(false) {} template @@ -257,12 +255,6 @@ auto WeightedTwoSidedEnumerator void { if (_graphOptions.isKilled()) { - // First clear our own instance (Ball) - clear(); - // Then clear the other instance (Ball) - other.clear(); - // Then clear the parent (WeightedTwoSidedEnumerator) - _parent.clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } @@ -426,9 +418,9 @@ WeightedTwoSidedEnumerator::searchDone() -> bool { if (_options.isKilled()) { - // Here we're not inside a Ball, so we can clear via main clear method - clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } if ((_left.noPathLeft() && _right.noPathLeft()) || isAlgorithmFinished()) { diff --git a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h index 57fc0dae0f37..f2e336604707 100644 --- a/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h +++ b/arangod/Graph/Enumerators/WeightedTwoSidedEnumerator.h @@ -221,8 +221,7 @@ class WeightedTwoSidedEnumerator { public: Ball(Direction dir, ProviderType&& provider, GraphOptions const& options, PathValidatorOptions validatorOptions, - arangodb::ResourceMonitor& resourceMonitor, - WeightedTwoSidedEnumerator& parent); + arangodb::ResourceMonitor& resourceMonitor); ~Ball(); auto clear() -> void; auto reset(VertexRef center, size_t depth = 0) -> void; @@ -297,11 +296,6 @@ class WeightedTwoSidedEnumerator { GraphOptions _graphOptions; double _diameter = -std::numeric_limits::infinity(); bool _haveSeenOtherSide; - - // Reference to the parent WeightedTwoSidedEnumerator - // Intention: To be able to call clear() on the parent - // Case: When a kill signal is received - WeightedTwoSidedEnumerator& _parent; }; enum BallSearchLocation { LEFT, RIGHT, FINISH }; diff --git a/arangod/Graph/Providers/ClusterProvider.cpp b/arangod/Graph/Providers/ClusterProvider.cpp index b8d69b1a03e0..587354cbcb09 100644 --- a/arangod/Graph/Providers/ClusterProvider.cpp +++ b/arangod/Graph/Providers/ClusterProvider.cpp @@ -446,7 +446,6 @@ template auto ClusterProvider::fetchVertices( std::vector const& looseEnds) -> std::vector { if (_opts.isKilled()) { - clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } std::vector result{}; @@ -476,7 +475,6 @@ template auto ClusterProvider::fetchEdges( std::vector const& fetchedVertices) -> Result { if (_opts.isKilled()) { - clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } @@ -511,7 +509,6 @@ auto ClusterProvider::expand( Step const& step, size_t previous, std::function const& callback) -> void { if (_opts.isKilled()) { - clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } diff --git a/arangod/Graph/Providers/SingleServerProvider.cpp b/arangod/Graph/Providers/SingleServerProvider.cpp index 84a20a18f548..cb94ce290dad 100644 --- a/arangod/Graph/Providers/SingleServerProvider.cpp +++ b/arangod/Graph/Providers/SingleServerProvider.cpp @@ -165,7 +165,6 @@ auto SingleServerProvider::expand( auto const& vertex = step.getVertex(); if (_opts.isKilled()) { - clear(); THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED); } From 7ff7f4f9570577d7e8f293b688db79b97aa92853 Mon Sep 17 00:00:00 2001 From: Heiko Kernbach Date: Wed, 21 May 2025 00:16:01 +0200 Subject: [PATCH 19/19] format --- arangod/Graph/Enumerators/TwoSidedEnumerator.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp index e5e7593a5c66..0f0bd630026c 100644 --- a/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp +++ b/arangod/Graph/Enumerators/TwoSidedEnumerator.cpp @@ -316,14 +316,10 @@ TwoSidedEnumerator:: PathValidatorOptions validatorOptions, arangodb::ResourceMonitor& resourceMonitor) : _options(std::move(options)), - _left{Direction::FORWARD, std::move(forwardProvider), - _options, validatorOptions, - resourceMonitor}, - _right{Direction::BACKWARD, - std::move(backwardProvider), - _options, - std::move(validatorOptions), - resourceMonitor}, + _left{Direction::FORWARD, std::move(forwardProvider), _options, + validatorOptions, resourceMonitor}, + _right{Direction::BACKWARD, std::move(backwardProvider), _options, + std::move(validatorOptions), resourceMonitor}, _baselineDepth(_options.getMaxDepth()), _resultPath{_left.provider(), _right.provider()} {}