diff --git a/.circleci/config.yml b/.circleci/config.yml index 1d90b871..6c7e8ae6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,11 +11,10 @@ workflows: name: Python (<< matrix.python_version >>) - ArangoDB (<< matrix.arangodb_license >>, << matrix.arangodb_version >> << matrix.arangodb_config >>) matrix: parameters: - # TODO: Revisit why pyenv doesn't recognize 3.12 - python_version: ["3.8", "3.9", "3.10", "3.11"] # "3.12" + python_version: ["3.9", "3.10", "3.11", "3.12"] arangodb_config: ["single", "cluster"] arangodb_license: ["community", "enterprise"] - arangodb_version: ["3.10.10", "3.11.4", "latest"] + arangodb_version: ["3.11", "latest"] jobs: lint: diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index bc06e12e..1c434f70 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -98,5 +98,6 @@ jobs: - name: Run Sphinx doctest run: python -m sphinx -b doctest docs docs/_build - - name: Generate Sphinx HTML - run: python -m sphinx -b html -W docs docs/_build + # No longer needed as this is handled by Read the Docs + #- name: Generate Sphinx HTML + # run: python -m sphinx -b html -W docs docs/_build diff --git a/.gitignore b/.gitignore index c6ef2445..4fa6f46d 100644 --- a/.gitignore +++ b/.gitignore @@ -124,3 +124,6 @@ arango/version.py # test results *_results.txt + +# devcontainers +.devcontainer diff --git a/.readthedocs.yaml b/.readthedocs.yaml index bb9d1590..4fda4951 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -15,6 +15,7 @@ sphinx: configuration: docs/conf.py # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs # builder: "dirhtml" + builder: html # Fail on all warnings to avoid broken references fail_on_warning: true diff --git a/README.md b/README.md index ac3bf680..d4b995fd 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,15 @@ ![Logo](https://user-images.githubusercontent.com/2701938/108583516-c3576680-72ee-11eb-883f-2d9b52e74e45.png) -[![CircleCI](https://dl.circleci.com/status-badge/img/gh/ArangoDB-Community/python-arango/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/ArangoDB-Community/python-arango/tree/main) -[![CodeQL](https://github.com/ArangoDB-Community/python-arango/actions/workflows/codeql.yaml/badge.svg)](https://github.com/ArangoDB-Community/python-arango/actions/workflows/codeql.yaml) -[![Docs](https://github.com/ArangoDB-Community/python-arango/actions/workflows/docs.yaml/badge.svg)](https://github.com/ArangoDB-Community/python-arango/actions/workflows/docs.yaml) -[![Coverage Status](https://codecov.io/gh/ArangoDB-Community/python-arango/branch/main/graph/badge.svg?token=M8zrjrzsUY)](https://codecov.io/gh/ArangoDB-Community/python-arango) -[![Last commit](https://img.shields.io/github/last-commit/ArangoDB-Community/python-arango)](https://github.com/ArangoDB-Community/python-arango/commits/master) +[![CircleCI](https://dl.circleci.com/status-badge/img/gh/arangodb/python-arango/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/arangodb/python-arango/tree/main) +[![CodeQL](https://github.com/arangodb/python-arango/actions/workflows/codeql.yaml/badge.svg)](https://github.com/arangodb/python-arango/actions/workflows/codeql.yaml) +[![Docs](https://github.com/arangodb/python-arango/actions/workflows/docs.yaml/badge.svg)](https://github.com/arangodb/python-arango/actions/workflows/docs.yaml) +[![Coverage Status](https://codecov.io/gh/arangodb/python-arango/branch/main/graph/badge.svg?token=M8zrjrzsUY)](https://codecov.io/gh/arangodb/python-arango) +[![Last commit](https://img.shields.io/github/last-commit/arangodb/python-arango)](https://github.com/arangodb/python-arango/commits/main) [![PyPI version badge](https://img.shields.io/pypi/v/python-arango?color=3775A9&style=for-the-badge&logo=pypi&logoColor=FFD43B)](https://pypi.org/project/python-arango/) -[![Python versions badge](https://img.shields.io/badge/3.8%2B-3776AB?style=for-the-badge&logo=python&logoColor=FFD43B&label=Python)](https://pypi.org/project/python-arango/) +[![Python versions badge](https://img.shields.io/badge/3.9%2B-3776AB?style=for-the-badge&logo=python&logoColor=FFD43B&label=Python)](https://pypi.org/project/python-arango/) -[![License](https://img.shields.io/github/license/ArangoDB-Community/python-arango?color=9E2165&style=for-the-badge)](https://github.com/ArangoDB-Community/python-arango/blob/master/LICENSE) +[![License](https://img.shields.io/github/license/arangodb/python-arango?color=9E2165&style=for-the-badge)](https://github.com/arangodb/python-arango/blob/main/LICENSE) [![Code style: black](https://img.shields.io/static/v1?style=for-the-badge&label=code%20style&message=black&color=black)](https://github.com/psf/black) [![Downloads](https://img.shields.io/pepy/dt/python-arango?style=for-the-badge&color=282661 )](https://pepy.tech/project/python-arango) @@ -19,10 +19,12 @@ Python driver for [ArangoDB](https://www.arangodb.com), a scalable multi-model database natively supporting documents, graphs and search. +If you're interested in using asyncio, please check [python-arango-async](https://github.com/arangodb/python-arango-async). + ## Requirements -- ArangoDB version 3.9+ -- Python version 3.8+ +- ArangoDB version 3.11+ +- Python version 3.9+ ## Installation @@ -52,8 +54,8 @@ db = client.db("test", username="root", password="passwd") # Create a new collection named "students". students = db.create_collection("students") -# Add a hash index to the collection. -students.add_hash_index(fields=["name"], unique=True) +# Add a persistent index to the collection. +students.add_index({'type': 'persistent', 'fields': ['name'], 'unique': True}) # Insert new documents into the collection. students.insert({"name": "jane", "age": 39}) @@ -113,12 +115,13 @@ edges.insert({"_from": "students/02", "_to": "lectures/MAT101"}) edges.insert({"_from": "students/02", "_to": "lectures/STA101"}) edges.insert({"_from": "students/03", "_to": "lectures/CSC101"}) -# Traverse the graph in outbound direction, breadth-first. -result = graph.traverse( - start_vertex="students/01", - direction="outbound", - strategy="breadthfirst" -) +# Traverse the graph in outbound direction, breath-first. +query = """ + FOR v, e, p IN 1..3 OUTBOUND 'students/01' GRAPH 'school' + OPTIONS { bfs: true, uniqueVertices: 'global' } + RETURN {vertex: v, edge: e, path: p} + """ +cursor = db.aql.execute(query) ``` Please see the [documentation](https://docs.python-arango.com) for more details. diff --git a/arango/aql.py b/arango/aql.py index 941000e5..25786302 100644 --- a/arango/aql.py +++ b/arango/aql.py @@ -144,6 +144,36 @@ def response_handler(resp: Response) -> bool: return self._execute(request, response_handler) + def plan_entries(self) -> Result[Jsons]: + """Return a list of all AQL query plan cache entries. + + :return: List of AQL query plan cache entries. + :rtype: list + :raise arango.exceptions.AQLCacheEntriesError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_api/query-plan-cache") + + def response_handler(resp: Response) -> Jsons: + if not resp.is_success: + raise AQLCacheEntriesError(resp, request) + result: Jsons = resp.body + return result + + return self._execute(request, response_handler) + + def clear_plan(self) -> Result[None]: + """Clear the AQL query plan cache. + + :raises arango.exceptions.AQLCacheClearError: If clearing the cache fails. + """ + request = Request(method="delete", endpoint="/_api/query-plan-cache") + + def response_handler(resp: Response) -> None: + if not resp.is_success: + raise AQLCacheClearError(resp, request) + + return self._execute(request, response_handler) + class AQL(ApiGroup): """AQL (ArangoDB Query Language) API wrapper. @@ -264,7 +294,7 @@ def execute( cache: Optional[bool] = None, memory_limit: int = 0, fail_on_warning: Optional[bool] = None, - profile: Optional[bool] = None, + profile: Optional[Union[bool, int]] = None, max_transaction_size: Optional[int] = None, max_warning_count: Optional[int] = None, intermediate_commit_count: Optional[int] = None, @@ -277,6 +307,7 @@ def execute( allow_dirty_read: bool = False, allow_retry: bool = False, force_one_shard_attribute_value: Optional[str] = None, + use_plan_cache: Optional[bool] = None, ) -> Result[Cursor]: """Execute the query and return the result cursor. @@ -317,8 +348,12 @@ def execute( this behaviour, so it does not need to be set per-query. :type fail_on_warning: bool :param profile: Return additional profiling details in the cursor, - unless the query cache is used. - :type profile: bool + unless the query cache is used. If set to True or 1, then query profiling + information can be fetched with `cursor.profile()`. If set to 2, additional + execution stats per query plan node are included via "nodes" in + `cursor.statistics()`, as well as a the query plan which can be fetched + with `cursor.plan()`. + :type profile: bool | int :param max_transaction_size: Transaction size limit in bytes. :type max_transaction_size: int :param max_warning_count: Max number of warnings returned. @@ -384,6 +419,8 @@ def execute( shipped to a wrong DB server and may not return results (i.e. empty result set). Use at your own risk. :param force_one_shard_attribute_value: str | None + :param use_plan_cache: If set to True, the query plan cache is used. + :param use_plan_cache: bool | None :return: Result cursor. :rtype: arango.cursor.Cursor :raise arango.exceptions.AQLQueryExecuteError: If execute fails. @@ -395,8 +432,6 @@ def execute( data["ttl"] = ttl if bind_vars is not None: data["bindVars"] = bind_vars - if cache is not None: - data["cache"] = cache if memory_limit is not None: data["memoryLimit"] = memory_limit @@ -433,6 +468,10 @@ def execute( options["allowRetry"] = allow_retry if force_one_shard_attribute_value is not None: options["forceOneShardAttributeValue"] = force_one_shard_attribute_value + if cache is not None: + options["cache"] = cache + if use_plan_cache is not None: + options["usePlanCache"] = use_plan_cache if options: data["options"] = options diff --git a/arango/client.py b/arango/client.py index 1666982e..b56755b0 100644 --- a/arango/client.py +++ b/arango/client.py @@ -12,8 +12,13 @@ JwtSuperuserConnection, ) from arango.database import StandardDatabase -from arango.exceptions import ServerConnectionError -from arango.http import DEFAULT_REQUEST_TIMEOUT, DefaultHTTPClient, HTTPClient +from arango.exceptions import ArangoClientError, ServerConnectionError +from arango.http import ( + DEFAULT_REQUEST_TIMEOUT, + DefaultHTTPClient, + HTTPClient, + RequestCompression, +) from arango.resolver import ( FallbackHostResolver, HostResolver, @@ -33,7 +38,7 @@ def default_serializer(x: Any) -> str: :return: The object serialized as a JSON string :rtype: str """ - return dumps(x) + return dumps(x, separators=(",", ":")) def default_deserializer(x: str) -> Any: @@ -74,10 +79,12 @@ class ArangoClient: :type deserializer: callable :param verify_override: Override TLS certificate verification. This will override the verify method of the underlying HTTP client. - None: Do not change the verification behavior of the underlying HTTP client. - True: Verify TLS certificate using the system CA certificates. - False: Do not verify TLS certificate. - str: Path to a custom CA bundle file or directory. + + - `None`: Do not change the verification behavior of the + underlying HTTP client. + - `True`: Verify TLS certificate using the system CA certificates. + - `False`: Do not verify TLS certificate. + - `str`: Path to a custom CA bundle file or directory. :type verify_override: Union[bool, str, None] :param request_timeout: This is the default request timeout (in seconds) for http requests issued by the client if the parameter http_client is @@ -85,6 +92,12 @@ class ArangoClient: None: No timeout. int: Timeout value in seconds. :type request_timeout: int | float + :param request_compression: Will compress requests to the server according to + the given algorithm. No compression happens by default. + :type request_compression: arango.http.RequestCompression | None + :param response_compression: Tells the server what compression algorithm is + acceptable for the response. No compression happens by default. + :type response_compression: str | None """ def __init__( @@ -97,6 +110,8 @@ def __init__( deserializer: Callable[[str], Any] = default_deserializer, verify_override: Union[bool, str, None] = None, request_timeout: Union[int, float, None] = DEFAULT_REQUEST_TIMEOUT, + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: if isinstance(hosts, str): self._hosts = [host.strip("/") for host in hosts.split(",")] @@ -133,6 +148,9 @@ def __init__( for session in self._sessions: session.verify = verify_override + self._request_compression = request_compression + self._response_compression = response_compression + def __repr__(self) -> str: return f"" @@ -183,7 +201,6 @@ def db( auth_method: str = "basic", user_token: Optional[str] = None, superuser_token: Optional[str] = None, - verify_certificate: bool = True, ) -> StandardDatabase: """Connect to an ArangoDB database and return the database API wrapper. @@ -212,8 +229,6 @@ def db( are ignored. This token is not refreshed automatically. Token expiry will not be checked. :type superuser_token: str - :param verify_certificate: Verify TLS certificates. - :type verify_certificate: bool :return: Standard database API wrapper. :rtype: arango.database.StandardDatabase :raise arango.exceptions.ServerConnectionError: If **verify** was set @@ -231,6 +246,8 @@ def db( serializer=self._serializer, deserializer=self._deserializer, superuser_token=superuser_token, + request_compression=self._request_compression, + response_compression=self._response_compression, ) elif user_token is not None: connection = JwtConnection( @@ -242,6 +259,8 @@ def db( serializer=self._serializer, deserializer=self._deserializer, user_token=user_token, + request_compression=self._request_compression, + response_compression=self._response_compression, ) elif auth_method.lower() == "basic": connection = BasicConnection( @@ -254,6 +273,8 @@ def db( http_client=self._http, serializer=self._serializer, deserializer=self._deserializer, + request_compression=self._request_compression, + response_compression=self._response_compression, ) elif auth_method.lower() == "jwt": connection = JwtConnection( @@ -266,6 +287,8 @@ def db( http_client=self._http, serializer=self._serializer, deserializer=self._deserializer, + request_compression=self._request_compression, + response_compression=self._response_compression, ) else: raise ValueError(f"invalid auth_method: {auth_method}") @@ -276,6 +299,6 @@ def db( except ServerConnectionError as err: raise err except Exception as err: - raise ServerConnectionError(f"bad connection: {err}") + raise ArangoClientError(f"bad connection: {err}") return StandardDatabase(connection) diff --git a/arango/cluster.py b/arango/cluster.py index a272f50c..ea13279d 100644 --- a/arango/cluster.py +++ b/arango/cluster.py @@ -11,9 +11,11 @@ ClusterServerCountError, ClusterServerEngineError, ClusterServerIDError, + ClusterServerModeError, ClusterServerRoleError, ClusterServerStatisticsError, ClusterServerVersionError, + ClusterVpackSortMigrationError, ) from arango.formatter import format_body from arango.request import Request @@ -57,6 +59,27 @@ def response_handler(resp: Response) -> str: return self._execute(request, response_handler) + def server_mode(self) -> Result[str]: + """Return the server mode. + + In a read-only server, all write operations will fail + with an error code of 1004 (ERROR_READ_ONLY). Creating or dropping + databases and collections will also fail with error code 11 (ERROR_FORBIDDEN). + + :return: Server mode. Possible values are "default" or "readonly". + :rtype: str + :raise arango.exceptions.ClusterServerModeError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["mode"]) + + raise ClusterServerModeError(resp, request) + + return self._execute(request, response_handler) + def server_version(self, server_id: str) -> Result[Json]: """Return the version of the given server. @@ -140,6 +163,58 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def server_maintenance_mode(self, server_id: str) -> Result[Json]: + """Return the maintenance status for the given server. + + :param server_id: Server ID. + :type server_id: str + :return: Maintenance status for the given server. + :rtype: dict + :raise arango.exceptions.ClusterMaintenanceModeError: If retrieval fails. + """ + request = Request( + method="get", + endpoint=f"/_admin/cluster/maintenance/{server_id}", + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body.get("result", {}) + return result + + raise ClusterMaintenanceModeError(resp, request) + + return self._execute(request, response_handler) + + def toggle_server_maintenance_mode( + self, server_id: str, mode: str, timeout: Optional[int] = None + ) -> Result[Json]: + """Enable or disable the maintenance mode for the given server. + + :param server_id: Server ID. + :type server_id: str + :param mode: Maintenance mode. Allowed values are "normal" and "maintenance". + :type mode: str + :param timeout: Timeout in seconds. + :type timeout: Optional[int] + :return: Result of the operation. + :rtype: dict + :raise arango.exceptions.ClusterMaintenanceModeError: If toggle fails. + """ + request = Request( + method="put", + endpoint=f"/_admin/cluster/maintenance/{server_id}", + data={"mode": mode, "timeout": timeout}, + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + + raise ClusterMaintenanceModeError(resp, request) + + return self._execute(request, response_handler) + def health(self) -> Result[Json]: """Return the cluster health. @@ -370,3 +445,57 @@ def response_handler(resp: Response) -> bool: return result return self._execute(request, response_handler) + + def vpack_sort_migration_status(self) -> Result[Json]: + """Query the status of the vpack sorting migration. + + :return: Status of the VPack sort migration. + :rtype: dict + """ + request = Request( + method="get", endpoint="/_admin/cluster/vpackSortMigration/status" + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterVpackSortMigrationError(resp, request) + result: Json = resp.body["result"] + return result + + return self._execute(request, response_handler) + + def vpack_sort_migration_index_check(self) -> Result[Json]: + """Check for indexes impacted by the sorting behavior before 3.12.2. + + :return: Status of indexes. + :rtype: dict + """ + request = Request( + method="get", endpoint="/_admin/cluster/vpackSortMigration/check" + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterVpackSortMigrationError(resp, request) + result: Json = resp.body["result"] + return result + + return self._execute(request, response_handler) + + def migrate_vpack_sorting(self) -> Result[Json]: + """Migrate instances to the new VPack sorting behavior. + + :return: Status of the VPack sort migration. + :rtype: dict + """ + request = Request( + method="put", endpoint="/_admin/cluster/vpackSortMigration/migrate" + ) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ClusterVpackSortMigrationError(resp, request) + result: Json = resp.body["result"] + return result + + return self._execute(request, response_handler) diff --git a/arango/collection.py b/arango/collection.py index 930ef771..a996dc5c 100644 --- a/arango/collection.py +++ b/arango/collection.py @@ -2,6 +2,7 @@ from numbers import Number from typing import List, Optional, Sequence, Tuple, Union +from warnings import warn from arango.api import ApiGroup from arango.connection import Connection @@ -49,11 +50,13 @@ from arango.typings import Fields, Headers, Json, Jsons, Params from arango.utils import ( build_filter_conditions, + build_sort_expression, get_batches, get_doc_id, is_none_or_bool, is_none_or_int, is_none_or_str, + validate_sort_parameters, ) @@ -563,15 +566,31 @@ def response_handler(resp: Response) -> bool: return self._execute(request, response_handler) - def truncate(self) -> Result[bool]: + def truncate( + self, + sync: Optional[bool] = None, + compact: Optional[bool] = None, + ) -> Result[bool]: """Delete all documents in the collection. + :param sync: Block until deletion operation is synchronized to disk. + :type sync: bool | None + :param compact: Whether to compact the collection after truncation. + :type compact: bool | None :return: True if collection was truncated successfully. :rtype: bool :raise arango.exceptions.CollectionTruncateError: If operation fails. """ + params: Json = {} + if sync is not None: + params["waitForSync"] = sync + if compact is not None: + params["compact"] = compact + request = Request( - method="put", endpoint=f"/_api/collection/{self.name}/truncate" + method="put", + endpoint=f"/_api/collection/{self.name}/truncate", + params=params, ) def response_handler(resp: Response) -> bool: @@ -629,20 +648,20 @@ def has( headers["x-arango-allow-dirty-read"] = "true" request = Request( - method="get", + method="head", endpoint=f"/_api/document/{handle}", headers=headers, read=self.name, ) def response_handler(resp: Response) -> bool: - if resp.error_code == 1202: - return False if resp.status_code == 412: raise DocumentRevisionError(resp, request) + if resp.status_code == 404: + return False if not resp.is_success: raise DocumentInError(resp, request) - return bool(resp.body) + return True return self._execute(request, response_handler) @@ -752,6 +771,7 @@ def find( skip: Optional[int] = None, limit: Optional[int] = None, allow_dirty_read: bool = False, + sort: Optional[Jsons] = None, ) -> Result[Cursor]: """Return all documents that match the given filters. @@ -763,13 +783,18 @@ def find( :type limit: int | None :param allow_dirty_read: Allow reads from followers in a cluster. :type allow_dirty_read: bool + :param sort: Document sort parameters + :type sort: Jsons | None :return: Document cursor. :rtype: arango.cursor.Cursor :raise arango.exceptions.DocumentGetError: If retrieval fails. + :raise arango.exceptions.SortValidationError: If sort parameters are invalid. """ assert isinstance(filters, dict), "filters must be a dict" assert is_none_or_int(skip), "skip must be a non-negative int" assert is_none_or_int(limit), "limit must be a non-negative int" + if sort: + validate_sort_parameters(sort) skip_val = skip if skip is not None else 0 limit_val = limit if limit is not None else "null" @@ -777,9 +802,9 @@ def find( FOR doc IN @@collection {build_filter_conditions(filters)} LIMIT {skip_val}, {limit_val} + {build_sort_expression(sort)} RETURN doc """ - bind_vars = {"@collection": self.name} request = Request( @@ -1074,7 +1099,7 @@ def build_coord_str_from_index(index: Json) -> str: FILTER GEO_CONTAINS(rect, {coord_str}) LIMIT {skip_val}, {limit_val} RETURN doc - """ + """ # noqa: E201 E202 bind_vars = {"@collection": self.name} @@ -1259,11 +1284,27 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) - def _add_index(self, data: Json) -> Result[Json]: - """Helper method for creating a new index. + def add_index(self, data: Json, formatter: bool = False) -> Result[Json]: + """Create an index. - :param data: Index data. + .. note:: + + As the `add_index` method was made available starting with driver + version 8, we have decided to deprecate the other `add_*_index` + methods, making this the official way to create indexes. While + the other methods still work, we recommend using this one instead. + Note that the other methods would use a formatter by default, + processing the index attributes returned by the server (for the + most part, it does a snake case conversion). This method skips that, + returning the raw index, except for the `id` attribute. However, + if you want the formatter to be applied for backwards compatibility, + you can set the `formatter` parameter to `True`. + + :param data: Index data. Must contain a "type" and "fields" attribute. :type data: dict + :param formatter: If set to True, apply formatting to the returned result. + Should only be used for backwards compatibility. + :type formatter: bool :return: New index details. :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. @@ -1278,7 +1319,7 @@ def _add_index(self, data: Json) -> Result[Json]: def response_handler(resp: Response) -> Json: if not resp.is_success: raise IndexCreateError(resp, request) - return format_index(resp.body) + return format_index(resp.body, formatter) return self._execute(request, response_handler) @@ -1293,6 +1334,12 @@ def add_hash_index( ) -> Result[Json]: """Create a new hash index. + .. warning:: + + The index types `hash` and `skiplist` are aliases for the persistent + index type and should no longer be used to create new indexes. The + aliases will be removed in a future version. + :param fields: Document fields to index. :type fields: [str] :param unique: Whether the index is unique. @@ -1311,6 +1358,9 @@ def add_hash_index( :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. """ + m = "add_hash_index is deprecated. Using add_index with {'type': 'hash'} instead." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + data: Json = {"type": "hash", "fields": fields} if unique is not None: @@ -1324,7 +1374,7 @@ def add_hash_index( if in_background is not None: data["inBackground"] = in_background - return self._add_index(data) + return self.add_index(data, formatter=True) def add_skiplist_index( self, @@ -1337,6 +1387,12 @@ def add_skiplist_index( ) -> Result[Json]: """Create a new skiplist index. + .. warning:: + + The index types `hash` and `skiplist` are aliases for the persistent + index type and should no longer be used to create new indexes. The + aliases will be removed in a future version. + :param fields: Document fields to index. :type fields: [str] :param unique: Whether the index is unique. @@ -1355,6 +1411,9 @@ def add_skiplist_index( :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. """ + m = "add_skiplist_index is deprecated. Using add_index with {'type': 'skiplist'} instead." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + data: Json = {"type": "skiplist", "fields": fields} if unique is not None: @@ -1368,12 +1427,12 @@ def add_skiplist_index( if in_background is not None: data["inBackground"] = in_background - return self._add_index(data) + return self.add_index(data, formatter=True) def add_geo_index( self, fields: Fields, - ordered: Optional[bool] = None, + geo_json: Optional[bool] = None, name: Optional[str] = None, in_background: Optional[bool] = None, legacyPolygons: Optional[bool] = False, @@ -1385,8 +1444,10 @@ def add_geo_index( with at least two floats. Documents with missing fields or invalid values are excluded. :type fields: str | [str] - :param ordered: Whether the order is longitude, then latitude. - :type ordered: bool | None + :param geo_json: Whether to use GeoJSON data-format or not. This + parameter has been renamed from `ordered`. See Github Issue + #234 for more details. + :type geo_json: bool | None :param name: Optional name for the index. :type name: str | None :param in_background: Do not hold the collection lock. @@ -1398,10 +1459,13 @@ def add_geo_index( :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. """ + m = "add_geo_index is deprecated. Using add_index with {'type': 'geo'} instead." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + data: Json = {"type": "geo", "fields": fields} - if ordered is not None: - data["geoJson"] = ordered + if geo_json is not None: + data["geoJson"] = geo_json if name is not None: data["name"] = name if in_background is not None: @@ -1409,7 +1473,7 @@ def add_geo_index( if legacyPolygons is not None: data["legacyPolygons"] = legacyPolygons - return self._add_index(data) + return self.add_index(data, formatter=True) def add_fulltext_index( self, @@ -1418,9 +1482,11 @@ def add_fulltext_index( name: Optional[str] = None, in_background: Optional[bool] = None, ) -> Result[Json]: - """Create a new fulltext index. This method is deprecated - in ArangoDB 3.10 and will be removed in a future version - of the driver. + """Create a new fulltext index. + + .. warning:: + This method is deprecated since ArangoDB 3.10 and will be removed + in a future version of the driver. :param fields: Document fields to index. :type fields: [str] @@ -1434,6 +1500,9 @@ def add_fulltext_index( :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. """ + m = "add_fulltext_index is deprecated. Using add_index with {'type': 'fulltext'} instead." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + data: Json = {"type": "fulltext", "fields": fields} if min_length is not None: @@ -1443,7 +1512,7 @@ def add_fulltext_index( if in_background is not None: data["inBackground"] = in_background - return self._add_index(data) + return self.add_index(data, formatter=True) def add_persistent_index( self, @@ -1486,6 +1555,9 @@ def add_persistent_index( :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. """ + m = "add_persistent_index is deprecated. Using add_index with {'type': 'persistent'} instead." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + data: Json = {"type": "persistent", "fields": fields} if unique is not None: @@ -1501,7 +1573,7 @@ def add_persistent_index( if cacheEnabled is not None: data["cacheEnabled"] = cacheEnabled - return self._add_index(data) + return self.add_index(data, formatter=True) def add_ttl_index( self, @@ -1524,6 +1596,9 @@ def add_ttl_index( :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. """ + m = "add_ttl_index is deprecated. Using add_index with {'type': 'ttl'} instead." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + data: Json = {"type": "ttl", "fields": fields, "expireAfter": expiry_time} if name is not None: @@ -1531,7 +1606,7 @@ def add_ttl_index( if in_background is not None: data["inBackground"] = in_background - return self._add_index(data) + return self.add_index(data, formatter=True) def add_inverted_index( self, @@ -1586,6 +1661,9 @@ def add_inverted_index( :rtype: dict :raise arango.exceptions.IndexCreateError: If create fails. """ + m = "add_inverted_index is deprecated. Using add_index with {'type': 'inverted'} instead." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + data: Json = {"type": "inverted", "fields": fields} if name is not None: @@ -1615,7 +1693,7 @@ def add_inverted_index( if cache is not None: data["cache"] = cache - return self._add_index(data) + return self.add_index(data, formatter=True) def delete_index(self, index_id: str, ignore_missing: bool = False) -> Result[bool]: """Delete an index. @@ -1673,6 +1751,8 @@ def insert_many( keep_none: Optional[bool] = None, merge: Optional[bool] = None, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, + raise_on_document_error: bool = False, ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: """Insert multiple documents. @@ -1682,15 +1762,8 @@ def insert_many( returned as an object in the result list. It is up to you to inspect the list to determine which documents were inserted successfully (returns document metadata) and which were not - (returns exception object). - - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single insert - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. + (returns exception object). Alternatively, you can rely on + setting **raise_on_document_error** to True (defaults to False). :param documents: List of new documents to insert. If they contain the "_key" or "_id" fields, the values are used as the keys of the new @@ -1727,6 +1800,14 @@ def insert_many( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str + :param raise_on_document_error: Whether to raise if a DocumentRevisionError + or a DocumentInsertError is encountered on an individual document, + as opposed to returning the error as an object in the result list. + Defaults to False. + :type raise_on_document_error: bool :return: List of document metadata (e.g. document keys, revisions) and any exception, or True if parameter **silent** was set to True. :rtype: [dict | ArangoServerError] | bool @@ -1749,6 +1830,8 @@ def insert_many( params["keepNull"] = keep_none if merge is not None: params["mergeObjects"] = merge + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -1777,7 +1860,12 @@ def response_handler( results.append(body) else: sub_resp = self._conn.prep_bulk_err_response(resp, body) - results.append(DocumentInsertError(sub_resp, request)) + error = DocumentInsertError(sub_resp, request) + + if raise_on_document_error: + raise error + + results.append(error) return results @@ -1795,6 +1883,7 @@ def update_many( silent: bool = False, refill_index_caches: Optional[bool] = None, raise_on_document_error: bool = False, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: """Update multiple documents. @@ -1807,14 +1896,6 @@ def update_many( (returns exception object). Alternatively, you can rely on setting **raise_on_document_error** to True (defaults to False). - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single update - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. - :param documents: Partial or full documents with the updated values. They must contain the "_id" or "_key" fields. :type documents: [dict] @@ -1847,6 +1928,9 @@ def update_many( as opposed to returning the error as an object in the result list. Defaults to False. :type raise_on_document_error: bool + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: List of document metadata (e.g. document keys, revisions) and any exceptions, or True if parameter **silent** was set to True. :rtype: [dict | ArangoError] | bool @@ -1863,6 +1947,8 @@ def update_many( } if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -1921,14 +2007,6 @@ def update_match( ) -> Result[int]: """Update matching documents. - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single update - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. - :param filters: Document filters. :type filters: dict :param body: Full or partial document body with the updates. @@ -1965,7 +2043,7 @@ def update_match( {f"LIMIT {limit}" if limit is not None else ""} UPDATE doc WITH @body IN @@collection OPTIONS {{ keepNull: @keep_none, mergeObjects: @merge {sync_val} }} - """ + """ # noqa: E201 E202 bind_vars = { "@collection": self.name, @@ -1999,6 +2077,7 @@ def replace_many( sync: Optional[bool] = None, silent: bool = False, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: """Replace multiple documents. @@ -2010,14 +2089,6 @@ def replace_many( successfully (returns document metadata) and which were not (returns exception object). - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single replace - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. - :param documents: New documents to replace the old ones with. They must contain the "_id" or "_key" fields. Edge documents must also have "_from" and "_to" fields. @@ -2040,6 +2111,9 @@ def replace_many( index caches if document operations affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: List of document metadata (e.g. document keys, revisions) and any exceptions, or True if parameter **silent** was set to True. :rtype: [dict | ArangoServerError] | bool @@ -2054,6 +2128,8 @@ def replace_many( } if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -2107,14 +2183,6 @@ def replace_match( ) -> Result[int]: """Replace matching documents. - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single replace - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. - :param filters: Document filters. :type filters: dict :param body: New document body. @@ -2144,7 +2212,7 @@ def replace_match( {f"LIMIT {limit}" if limit is not None else ""} REPLACE doc WITH @body IN @@collection {f"OPTIONS {{ {sync_val} }}" if sync_val else ""} - """ + """ # noqa: E201 E202 bind_vars = {"@collection": self.name, "body": body} @@ -2172,6 +2240,7 @@ def delete_many( sync: Optional[bool] = None, silent: bool = False, refill_index_caches: Optional[bool] = None, + raise_on_document_error: bool = False, ) -> Result[Union[bool, List[Union[Json, ArangoServerError]]]]: """Delete multiple documents. @@ -2183,14 +2252,6 @@ def delete_many( successfully (returns document metadata) and which were not (returns exception object). - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single delete - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. - :param documents: Document IDs, keys or bodies. Document bodies must contain the "_id" or "_key" fields. :type documents: [str | dict] @@ -2208,6 +2269,11 @@ def delete_many( index caches if document operations affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param raise_on_document_error: Whether to raise if a DocumentRevisionError + or a DocumentDeleteError is encountered on an individual document, + as opposed to returning the error as an object in the result list. + Defaults to False. + :type raise_on_document_error: bool :return: List of document metadata (e.g. document keys, revisions) and any exceptions, or True if parameter **silent** was set to True. :rtype: [dict | ArangoServerError] | bool @@ -2259,6 +2325,10 @@ def response_handler( error = DocumentRevisionError(sub_resp, request) else: error = DocumentDeleteError(sub_resp, request) + + if raise_on_document_error: + raise error + results.append(error) return results @@ -2274,14 +2344,6 @@ def delete_match( ) -> Result[int]: """Delete matching documents. - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single delete - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. - :param filters: Document filters. :type filters: dict :param limit: Max number of documents to delete. If the limit is lower @@ -2309,7 +2371,7 @@ def delete_match( {f"LIMIT {limit}" if limit is not None else ""} REMOVE doc IN @@collection {f"OPTIONS {{ {sync_val} }}" if sync_val else ""} - """ + """ # noqa: E201 E202 bind_vars = {"@collection": self.name} @@ -2348,14 +2410,6 @@ def import_bulk( This method is faster than :func:`arango.collection.Collection.insert_many` but does not return as many details. - .. note:: - - In edge/vertex collections, this method does NOT provide the - transactional guarantees and validations that single insert - operation does for graphs. If these properties are required, see - :func:`arango.database.StandardDatabase.begin_batch_execution` - for an alternative approach. - :param documents: List of new documents to insert. If they contain the "_key" or "_id" fields, the values are used as the keys of the new documents (auto-generated otherwise). Any "_rev" field is ignored. @@ -2528,6 +2582,7 @@ def insert( keep_none: Optional[bool] = None, merge: Optional[bool] = None, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, Json]]: """Insert a new document. @@ -2566,6 +2621,9 @@ def insert( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2587,6 +2645,8 @@ def insert( params["keepNull"] = keep_none if merge is not None: params["mergeObjects"] = merge + if version_attribute is not None: + params["versionAttribute"] = version_attribute # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: @@ -2625,6 +2685,7 @@ def update( sync: Optional[bool] = None, silent: bool = False, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, Json]]: """Update a document. @@ -2655,6 +2716,9 @@ def update( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning + to document operations. + :type version_attribute: str :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2667,12 +2731,14 @@ def update( "returnNew": return_new, "returnOld": return_old, "ignoreRevs": not check_rev, - "overwrite": not check_rev, "silent": silent, } if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute + # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: params["refillIndexCaches"] = refill_index_caches @@ -2708,6 +2774,7 @@ def replace( sync: Optional[bool] = None, silent: bool = False, refill_index_caches: Optional[bool] = None, + version_attribute: Optional[str] = None, ) -> Result[Union[bool, Json]]: """Replace a document. @@ -2733,6 +2800,9 @@ def replace( index caches if document insertions affect the edge index or cache-enabled persistent indexes. :type refill_index_caches: bool | None + :param version_attribute: support for simple external versioning to + document operations. + :type version_attribute: str :return: Document metadata (e.g. document key, revision) or True if parameter **silent** was set to True. :rtype: bool | dict @@ -2749,6 +2819,9 @@ def replace( if sync is not None: params["waitForSync"] = sync + if version_attribute is not None: + params["versionAttribute"] = version_attribute + # New in ArangoDB 3.9.6 and 3.10.2 if refill_index_caches is not None: params["refillIndexCaches"] = refill_index_caches diff --git a/arango/connection.py b/arango/connection.py index 3daa4585..8de2643a 100644 --- a/arango/connection.py +++ b/arango/connection.py @@ -23,7 +23,7 @@ JWTRefreshError, ServerConnectionError, ) -from arango.http import HTTPClient +from arango.http import HTTPClient, RequestCompression from arango.request import Request from arango.resolver import HostResolver from arango.response import Response @@ -44,7 +44,10 @@ def __init__( http_client: HTTPClient, serializer: Callable[..., str], deserializer: Callable[[str], Any], + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: + self._hosts = hosts self._url_prefixes = [f"{host}/_db/{db_name}" for host in hosts] self._host_resolver = host_resolver self._sessions = sessions @@ -53,6 +56,8 @@ def __init__( self._serializer = serializer self._deserializer = deserializer self._username: Optional[str] = None + self._request_compression = request_compression + self._response_compression = response_compression @property def db_name(self) -> str: @@ -133,6 +138,19 @@ def process_request( """ tries = 0 indexes_to_filter: Set[int] = set() + + data = self.normalize_data(request.data) + if ( + self._request_compression is not None + and isinstance(data, str) + and self._request_compression.needs_compression(data) + ): + request.headers["content-encoding"] = self._request_compression.encoding() + data = self._request_compression.compress(data) + + if self._response_compression is not None: + request.headers["accept-encoding"] = self._response_compression + while tries < self._host_resolver.max_tries: try: resp = self._http.send_request( @@ -140,7 +158,7 @@ def process_request( method=request.method, url=self._url_prefixes[host_index] + request.endpoint, params=request.params, - data=self.normalize_data(request.data), + data=data, headers=request.headers, auth=auth, ) @@ -209,9 +227,13 @@ def ping(self) -> int: request = Request(method="get", endpoint="/_api/collection") resp = self.send_request(request) if resp.status_code in {401, 403}: - raise ServerConnectionError("bad username/password or token is expired") + raise ServerConnectionError( + resp, request, "bad username/password or token is expired" + ) if not resp.is_success: # pragma: no cover - raise ServerConnectionError(resp.error_message or "bad server response") + raise ServerConnectionError( + resp, request, resp.error_message or "bad server response" + ) return resp.status_code @abstractmethod @@ -243,6 +265,10 @@ class BasicConnection(BaseConnection): :type password: str :param http_client: User-defined HTTP client. :type http_client: arango.http.HTTPClient + :param: request_compression: The request compression algorithm. + :type request_compression: arango.http.RequestCompression | None + :param: response_compression: The response compression algorithm. + :type response_compression: str | None """ def __init__( @@ -256,6 +282,8 @@ def __init__( http_client: HTTPClient, serializer: Callable[..., str], deserializer: Callable[[str], Any], + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: super().__init__( hosts, @@ -265,6 +293,8 @@ def __init__( http_client, serializer, deserializer, + request_compression, + response_compression, ) self._username = username self._auth = (username, password) @@ -298,6 +328,10 @@ class JwtConnection(BaseConnection): :type password: str :param http_client: User-defined HTTP client. :type http_client: arango.http.HTTPClient + :param request_compression: The request compression algorithm. + :type request_compression: arango.http.RequestCompression | None + :param response_compression: The response compression algorithm. + :type response_compression: str | None """ def __init__( @@ -312,6 +346,8 @@ def __init__( username: Optional[str] = None, password: Optional[str] = None, user_token: Optional[str] = None, + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: super().__init__( hosts, @@ -321,6 +357,8 @@ def __init__( http_client, serializer, deserializer, + request_compression, + response_compression, ) self._username = username self._password = password @@ -439,6 +477,10 @@ class JwtSuperuserConnection(BaseConnection): :type http_client: arango.http.HTTPClient :param superuser_token: User generated token for superuser access. :type superuser_token: str + :param request_compression: The request compression algorithm. + :type request_compression: arango.http.RequestCompression | None + :param response_compression: The response compression algorithm. + :type response_compression: str | None """ def __init__( @@ -451,6 +493,8 @@ def __init__( serializer: Callable[..., str], deserializer: Callable[[str], Any], superuser_token: str, + request_compression: Optional[RequestCompression] = None, + response_compression: Optional[str] = None, ) -> None: super().__init__( hosts, @@ -460,6 +504,8 @@ def __init__( http_client, serializer, deserializer, + request_compression, + response_compression, ) self._auth_header = f"bearer {superuser_token}" diff --git a/arango/cursor.py b/arango/cursor.py index 83da8881..7c9fabe2 100644 --- a/arango/cursor.py +++ b/arango/cursor.py @@ -40,6 +40,7 @@ class Cursor: "_count", "_cached", "_stats", + "_plan", "_profile", "_warnings", "_has_more", @@ -63,6 +64,7 @@ def __init__( self._count: Optional[int] = None self._cached = None self._stats = None + self._plan = None self._profile = None self._warnings = None self._next_batch_id: Optional[str] = None @@ -132,12 +134,18 @@ def _update(self, data: Json) -> Json: self._warnings = extra["warnings"] result["warnings"] = extra["warnings"] + if "plan" in extra: + self._plan = extra["plan"] + result["plan"] = extra["plan"] + if "stats" in extra: stats = extra["stats"] if "writesExecuted" in stats: stats["modified"] = stats.pop("writesExecuted") if "writesIgnored" in stats: stats["ignored"] = stats.pop("writesIgnored") + if "documentLookups" in stats: + stats["lookups"] = stats.pop("documentLookups") if "scannedFull" in stats: stats["scanned_full"] = stats.pop("scannedFull") if "scannedIndex" in stats: @@ -159,6 +167,9 @@ def _update(self, data: Json) -> Json: if "peakMemoryUsage" in stats: stats["peak_memory_usage"] = stats.pop("peakMemoryUsage") + if "intermediateCommits" in stats: + stats["intermediate_commits"] = stats.pop("intermediateCommits") + self._stats = stats result["statistics"] = stats @@ -239,6 +250,14 @@ def warnings(self) -> Optional[Sequence[Json]]: """ return self._warnings + def plan(self) -> Optional[Json]: + """Return query execution plan. + + :return: Query execution plan. + :rtype: dict + """ + return self._plan + def empty(self) -> bool: """Check if the current batch is empty. @@ -313,7 +332,6 @@ def close(self, ignore_missing: bool = False) -> Optional[bool]: smaller than the batch size). :rtype: bool | None :raise arango.exceptions.CursorCloseError: If operation fails. - :raise arango.exceptions.CursorStateError: If cursor ID is not set. """ if self._id is None: return None diff --git a/arango/database.py b/arango/database.py index e33bd5ae..8a145910 100644 --- a/arango/database.py +++ b/arango/database.py @@ -8,7 +8,7 @@ from datetime import datetime from numbers import Number -from typing import Any, List, Optional, Sequence, Union +from typing import Any, Dict, List, Optional, Sequence, Union from warnings import warn from arango.api import ApiGroup @@ -17,6 +17,7 @@ from arango.cluster import Cluster from arango.collection import StandardCollection from arango.connection import Connection +from arango.errno import HTTP_NOT_FOUND from arango.exceptions import ( AnalyzerCreateError, AnalyzerDeleteError, @@ -27,10 +28,12 @@ CollectionCreateError, CollectionDeleteError, CollectionListError, + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, GraphCreateError, GraphDeleteError, GraphListError, @@ -40,15 +43,23 @@ PermissionListError, PermissionResetError, PermissionUpdateError, + ServerAvailableOptionsGetError, + ServerCurrentOptionsGetError, ServerDetailsError, ServerEchoError, ServerEncryptionError, ServerEngineError, + ServerExecuteError, ServerLicenseGetError, ServerLicenseSetError, ServerLogLevelError, + ServerLogLevelResetError, ServerLogLevelSetError, + ServerLogSettingError, + ServerLogSettingSetError, ServerMetricsError, + ServerModeError, + ServerModeSetError, ServerReadLogError, ServerReloadRoutingError, ServerRequiredDBVersionError, @@ -224,6 +235,36 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def execute(self, command: str) -> Result[Any]: + """Execute raw Javascript command on the server. + + Executes the JavaScript code in the body on the server as + the body of a function with no arguments. If you have a + return statement then the return value you produce will be returned + as 'application/json'. + + NOTE: this method endpoint will only be usable if the server + was started with the option `--javascript.allow-admin-execute true`. + The default value of this option is false, which disables the execution + of user-defined code and disables this API endpoint entirely. + This is also the recommended setting for production. + + :param command: Javascript command to execute. + :type command: str + :return: Return value of **command**, if any. + :rtype: Any + :raise arango.exceptions.ServerExecuteError: If execution fails. + """ + request = Request(method="post", endpoint="/_admin/execute", data=command) + + def response_handler(resp: Response) -> Any: + if not resp.is_success: + raise ServerExecuteError(resp, request) + + return resp.body + + return self._execute(request, response_handler) + def execute_transaction( self, command: str, @@ -396,7 +437,7 @@ def set_license(self, license: str, force: bool = False) -> Result[Json]: instance. Can be called on single servers, Coordinators, and DB-Servers. - :param license: The Base64-encoded license string. + :param license: The Base64-encoded license string, wrapped in double-quotes. :type license: str :param force: If set to True, the new license will be set even if it expires sooner than the current license. @@ -439,6 +480,47 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def compact( + self, + change_level: Optional[bool] = None, + compact_bottom_most_level: Optional[bool] = None, + ) -> Result[Json]: + """Compact all databases. + + NOTE: This command can cause a full rewrite of all data in all databases, + which may take very long for large databases. It should thus only be used with + care and only when additional I/O load can be tolerated for a prolonged time. + + This method can be used to reclaim disk space after substantial data deletions + have taken place, by compacting the entire database system data. + + This method requires superuser access. + + :param change_level: Whether or not compacted data should be moved to + the minimum possible level. Default value is False. + :type change_level: bool | None + :param compact_bottom_most_level: Whether or not to compact the + bottom-most level of data. Default value is False. + :type compact_bottom_most_level: bool | None + :return: Collection compact. + :rtype: dict + :raise arango.exceptions.CollectionCompactError: If retrieval fails. + """ + data = {} + if change_level is not None: + data["changeLevel"] = change_level + if compact_bottom_most_level is not None: + data["compactBottomMostLevel"] = compact_bottom_most_level + + request = Request(method="put", endpoint="/_admin/compact", data=data) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise DatabaseCompactError(resp, request) + + return self._execute(request, response_handler) + def required_db_version(self) -> Result[str]: """Return required version of target database. @@ -510,6 +592,56 @@ def response_handler(resp: Response) -> str: return self._execute(request, response_handler) + def mode(self) -> Result[str]: + """Return the server mode (default or read-only) + + In a read-only server, all write operations will fail + with an error code of 1004 (ERROR_READ_ONLY). Creating or dropping + databases and collections will also fail with error code 11 (ERROR_FORBIDDEN). + + :return: Server mode. Possible values are "default" or "readonly". + :rtype: str + :raise arango.exceptions.ServerModeError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/server/mode") + + def response_handler(resp: Response) -> str: + if resp.is_success: + return str(resp.body["mode"]) + + raise ServerModeError(resp, request) + + return self._execute(request, response_handler) + + def set_mode(self, mode: str) -> Result[Json]: + """Set the server mode to read-only or default. + + Update mode information about a server. The JSON response will + contain a field mode with the value readonly or default. + In a read-only server all write operations will fail with an error + code of 1004 (ERROR_READ_ONLY). Creating or dropping of databases + and collections will also fail with error code 11 (ERROR_FORBIDDEN). + + This is a protected API. It requires authentication and administrative + server rights. + + :param mode: Server mode. Possible values are "default" or "readonly". + :type mode: str + :return: Server mode. + :rtype: str + :raise arango.exceptions.ServerModeSetError: If set fails. + """ + request = Request( + method="put", endpoint="/_admin/server/mode", data={"mode": mode} + ) + + def response_handler(resp: Response) -> Json: + if resp.is_success: + return format_body(resp.body) + raise ServerModeSetError(resp, request) + + return self._execute(request, response_handler) + def time(self) -> Result[datetime]: """Return server system time. @@ -526,14 +658,23 @@ def response_handler(resp: Response) -> datetime: return self._execute(request, response_handler) - def echo(self) -> Result[Json]: - """Return details of the last request (e.g. headers, payload). + def echo(self, body: Optional[Any] = None) -> Result[Json]: + """Return details of the last request (e.g. headers, payload), + or echo the given request body. + :param body: The body of the request. Can be of any type + and is simply forwarded. If not set, the details of the last + request are returned. + :type body: dict | list | str | int | float | None :return: Details of the last request. :rtype: dict :raise arango.exceptions.ServerEchoError: If retrieval fails. """ - request = Request(method="get", endpoint="/_admin/echo") + request = ( + Request(method="get", endpoint="/_admin/echo") + if body is None + else Request(method="post", endpoint="/_admin/echo", data=body) + ) def response_handler(resp: Response) -> Json: if not resp.is_success: @@ -749,7 +890,55 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) - def log_levels(self, server_id: Optional[str] = None) -> Result[Json]: + def log_settings(self) -> Result[Json]: + """Return the structured log settings. + + :return: Current log settings. False values are not returned. + :rtype: dict + """ + request = Request(method="get", endpoint="/_admin/log/structured") + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingError(resp, request) + result: Json = resp.body + return result + + return self._execute(request, response_handler) + + def set_log_settings(self, **kwargs: Dict[str, Any]) -> Result[Json]: + """Set the structured log settings. + + This method takes arbitrary keyword arguments where the keys are the + structured log parameters and the values are true or false, for either + enabling or disabling the parameters. + + .. code-block:: python + + arango.set_log_settings( + database=True, + url=True, + username=False, + ) + + :param kwargs: Structured log parameters. + :type kwargs: Dict[str, Any] + :return: New log settings. False values are not returned. + :rtype: dict + """ + request = Request(method="put", endpoint="/_admin/log/structured", data=kwargs) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogSettingSetError(resp, request) + result: Json = resp.body + return result + + return self._execute(request, response_handler) + + def log_levels( + self, server_id: Optional[str] = None, with_appenders: Optional[bool] = None + ) -> Result[Json]: """Return current logging levels. :param server_id: Forward log level to a specific server. This makes it @@ -757,12 +946,16 @@ def log_levels(self, server_id: Optional[str] = None) -> Result[Json]: JWT authentication whereas Coordinators also support authentication using usernames and passwords. :type server_id: str + :param with_appenders: Include appenders in the response. + :type with_appenders: bool :return: Current logging levels. :rtype: dict """ params: Params = {} if server_id is not None: params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders request = Request(method="get", endpoint="/_admin/log/level", params=params) @@ -775,7 +968,10 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) def set_log_levels( - self, server_id: Optional[str] = None, **kwargs: str + self, + server_id: Optional[str] = None, + with_appenders: Optional[bool] = None, + **kwargs: Dict[str, Any], ) -> Result[Json]: """Set the logging levels. @@ -797,12 +993,18 @@ def set_log_levels( JWT authentication whereas Coordinators also support authentication using usernames and passwords. :type server_id: str | None + :param with_appenders: Include appenders in the request. + :type with_appenders: bool | None + :param kwargs: Logging levels. + :type kwargs: Dict[str, Any] :return: New logging levels. :rtype: dict """ params: Params = {} if server_id is not None: params["serverId"] = server_id + if with_appenders is not None: + params["withAppenders"] = with_appenders request = Request( method="put", endpoint="/_admin/log/level", params=params, data=kwargs @@ -816,6 +1018,35 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def reset_log_levels(self, server_id: Optional[str] = None) -> Result[Json]: + """Reset the logging levels. + + Revert the server’s log level settings to the values they had at startup, + as determined by the startup options specified on the command-line, + a configuration file, and the factory defaults. + + :param server_id: Forward log level to a specific server. This makes it + easier to adjust the log levels in clusters because DB-Servers require + JWT authentication whereas Coordinators also support authentication + using usernames and passwords. + :type server_id: str | None + :return: New logging levels. + :rtype: dict + """ + params: Params = {} + if server_id is not None: + params["serverId"] = server_id + + request = Request(method="delete", endpoint="/_admin/log/level", params=params) + + def response_handler(resp: Response) -> Json: + if not resp.is_success: + raise ServerLogLevelResetError(resp, request) + result: Json = resp.body + return result + + return self._execute(request, response_handler) + def reload_routing(self) -> Result[bool]: """Reload the routing information. @@ -838,7 +1069,7 @@ def metrics(self) -> Result[str]: :return: Server metrics in Prometheus format. :rtype: str """ - request = Request(method="get", endpoint="/_admin/metrics") + request = Request(method="get", endpoint="/_admin/metrics/v2") def response_handler(resp: Response) -> str: if resp.is_success: @@ -933,6 +1164,58 @@ def response_handler(resp: Response) -> Json: return self._execute(request, response_handler) + def options(self) -> Result[Json]: + """Return the currently-set server options (ArangoDB 3.12+) + + As this API may reveal sensitive data about the deployment, it can only + be accessed from inside the _system database. In addition, there is a + policy control startup option --server.options-api that determines if and + to whom the API is made available. This option can have the following + values: + - disabled: API is disabled. + - jwt: API can only be accessed via superuser JWT. + - admin: API can be accessed by admin users in the _system database only. + - public: everyone with access to _system database can access the API. + + :return: Server options. + :rtype: dict + """ + request = Request(method="get", endpoint="/_admin/options") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body + return result + raise ServerCurrentOptionsGetError(resp, request) + + return self._execute(request, response_handler) + + def options_available(self) -> Result[Json]: + """Return a description of all available server options (ArangoDB 3.12+) + + As this API may reveal sensitive data about the deployment, it can only + be accessed from inside the _system database. In addition, there is a + policy control startup option --server.options-api that determines if and + to whom the API is made available. This option can have the following + values: + - disabled: API is disabled. + - jwt: API can only be accessed via superuser JWT. + - admin: API can be accessed by admin users in the _system database only. + - public: everyone with access to _system database can access the options API. + + :return: Server options. + :rtype: dict + """ + request = Request(method="get", endpoint="/_admin/options-description") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body + return result + raise ServerAvailableOptionsGetError(resp, request) + + return self._execute(request, response_handler) + ####################### # Database Management # ####################### @@ -1002,8 +1285,9 @@ def create_database( :type name: str :param users: List of users with access to the new database, where each user is a dictionary with fields "username", "password", "active" - and "extra" (see below for example). If not set, only the admin and - current user are granted access. + and "extra" (see below for example). If not set, the default user root + will be used to ensure that the new database will be accessible after + it is created. :type users: [dict] :param replication_factor: Default replication factor for collections created in this database. Special values include "satellite" which @@ -1257,10 +1541,11 @@ def create_collection( :raise arango.exceptions.CollectionCreateError: If create fails. """ key_options: Json = {"type": key_generator, "allowUserKeys": user_keys} - if key_increment is not None: - key_options["increment"] = key_increment - if key_offset is not None: - key_options["offset"] = key_offset + if key_generator == "autoincrement": + if key_increment is not None: + key_options["increment"] = key_increment + if key_offset is not None: + key_options["offset"] = key_offset data: Json = { "name": name, @@ -1360,12 +1645,14 @@ def has_graph(self, name: str) -> Result[bool]: :return: True if graph exists, False otherwise. :rtype: bool """ - request = Request(method="get", endpoint="/_api/gharial") + request = Request(method="get", endpoint=f"/_api/gharial/{name}") def response_handler(resp: Response) -> bool: - if not resp.is_success: - raise GraphListError(resp, request) - return any(name == graph["_key"] for graph in resp.body["graphs"]) + if resp.is_success: + return True + if resp.status_code == HTTP_NOT_FOUND: + return False + raise GraphListError(resp, request) return self._execute(request, response_handler) @@ -1414,6 +1701,8 @@ def create_graph( shard_count: Optional[int] = None, replication_factor: Optional[int] = None, write_concern: Optional[int] = None, + satellite_collections: Optional[Sequence[str]] = None, + sync: Optional[bool] = None, ) -> Result[Graph]: """Create a new graph. @@ -1438,7 +1727,8 @@ def create_graph( :param smart_field: Document field used to shard the vertices of the graph. To use this, parameter **smart** must be set to True and every vertex in the graph must have the smart field. Applies only - to enterprise version of ArangoDB. + to enterprise version of ArangoDB. NOTE: If this field is + None and **smart** is True, an Enterprise Graph will be created. :type smart_field: str | None :param shard_count: Number of shards used for every collection in the graph. To use this, parameter **smart** must be set to True and @@ -1461,6 +1751,14 @@ def create_graph( parameter cannot be larger than that of **replication_factor**. Default value is 1. Used for clusters only. :type write_concern: int + :param satellite_collections: An array of collection names that is + used to create SatelliteCollections for a (Disjoint) SmartGraph + using SatelliteCollections (Enterprise Edition only). Each array + element must be a string and a valid collection name. The + collection type cannot be modified later. + :type satellite_collections: [str] | None + :param sync: Wait until everything is synced to disk. + :type sync: bool | None :return: Graph API wrapper. :rtype: arango.graph.Graph :raise arango.exceptions.GraphCreateError: If create fails. @@ -1501,8 +1799,19 @@ def create_graph( data["options"]["replicationFactor"] = replication_factor if write_concern is not None: # pragma: no cover data["options"]["writeConcern"] = write_concern + if satellite_collections is not None: # pragma: no cover + data["options"]["satellites"] = satellite_collections - request = Request(method="post", endpoint="/_api/gharial", data=data) + params: Params = {} + if sync is not None: + params["waitForSync"] = sync + + request = Request( + method="post", + endpoint="/_api/gharial", + data=data, + params=params, + ) def response_handler(resp: Response) -> Graph: if resp.is_success: @@ -2675,6 +2984,38 @@ def response_handler(resp: Response) -> bool: return self._execute(request, response_handler) + ########### + # Support # + ########### + + def support_info(self) -> Result[Json]: + """Return information about the deployment. + + Retrieves deployment information for support purposes. + The endpoint returns data about the ArangoDB version used, + the host (operating system, server ID, CPU and storage capacity, + current utilization, a few metrics) and the other servers in the + deployment (in case of Active Failover or cluster deployments). + + NOTE: This method can only be accessed from inside the **_system** database. + The is a policy control startup option `--server.support-info-api` that controls + if and to whom the API is made available. + + :return: Deployment information. + :rtype: dict + :raise arango.exceptions.DatabaseSupportInfoError: If retrieval fails. + """ + request = Request(method="get", endpoint="/_admin/support-info") + + def response_handler(resp: Response) -> Json: + if resp.is_success: + result: Json = resp.body + return result + + raise DatabaseSupportInfoError(resp, request) + + return self._execute(request, response_handler) + class StandardDatabase(Database): """Standard database API wrapper.""" @@ -2731,6 +3072,14 @@ def begin_batch_execution( """ return BatchDatabase(self._conn, return_result, max_workers) + def fetch_transaction(self, transaction_id: str) -> "TransactionDatabase": + """Fetch an existing transaction. + + :param transaction_id: The ID of the existing transaction. + :type transaction_id: str + """ + return TransactionDatabase(connection=self._conn, transaction_id=transaction_id) + def begin_transaction( self, read: Union[str, Sequence[str], None] = None, @@ -2740,6 +3089,7 @@ def begin_transaction( allow_implicit: Optional[bool] = None, lock_timeout: Optional[int] = None, max_size: Optional[int] = None, + skip_fast_lock_round: Optional[bool] = None, ) -> "TransactionDatabase": """Begin a transaction. @@ -2763,6 +3113,9 @@ def begin_transaction( :type lock_timeout: int | None :param max_size: Max transaction size in bytes. :type max_size: int | None + :param skip_fast_lock_round: Whether to disable fast locking for write + operations. + :type skip_fast_lock_round: bool | None :return: Database API wrapper object specifically for transactions. :rtype: arango.database.TransactionDatabase """ @@ -2775,6 +3128,7 @@ def begin_transaction( allow_implicit=allow_implicit, lock_timeout=lock_timeout, max_size=max_size, + skip_fast_lock_round=skip_fast_lock_round, ) def begin_controlled_execution( @@ -2908,6 +3262,11 @@ class TransactionDatabase(Database): :type lock_timeout: int | None :param max_size: Max transaction size in bytes. :type max_size: int | None + :param transaction_id: Initialize using an existing transaction instead of creating + a new transaction. + :type transaction_id: str | None + :param skip_fast_lock_round: Whether to disable fast locking for write operations. + :type skip_fast_lock_round: bool | None """ def __init__( @@ -2920,6 +3279,8 @@ def __init__( allow_implicit: Optional[bool] = None, lock_timeout: Optional[int] = None, max_size: Optional[int] = None, + transaction_id: Optional[str] = None, + skip_fast_lock_round: Optional[bool] = None, ) -> None: self._executor: TransactionApiExecutor super().__init__( @@ -2933,6 +3294,8 @@ def __init__( allow_implicit=allow_implicit, lock_timeout=lock_timeout, max_size=max_size, + transaction_id=transaction_id, + skip_fast_lock_round=skip_fast_lock_round, ), ) diff --git a/arango/errno.py b/arango/errno.py index bc42bcab..1f48b216 100644 --- a/arango/errno.py +++ b/arango/errno.py @@ -1,1339 +1,1168 @@ ################## -# General Errors # +# General errors # ################## -# No error occurred. +# no error NO_ERROR = 0 -# General error occurred. +# failed FAILED = 1 -# Operating system error occurred. +# system error SYS_ERROR = 2 -# Out of memory. +# out of memory OUT_OF_MEMORY = 3 -# Internal error occurred. +# internal error INTERNAL = 4 -# Illegal number representation given. +# illegal number ILLEGAL_NUMBER = 5 -# Numeric overflow occurred. +# numeric overflow NUMERIC_OVERFLOW = 6 -# Unknown option supplied by user. +# illegal option ILLEGAL_OPTION = 7 -# Detected PID without living process. +# dead process identifier DEAD_PID = 8 -# Feature not implemented. +# not implemented NOT_IMPLEMENTED = 9 -# Bad parameter. +# bad parameter BAD_PARAMETER = 10 -# Missing permission. +# forbidden FORBIDDEN = 11 -# Out of memory (mmap). -OUT_OF_MEMORY_MMAP = 12 - -# Corrupt CSV line. +# csv is corrupt CORRUPTED_CSV = 13 -# File not found. +# file not found FILE_NOT_FOUND = 14 -# Cannot write to file. +# cannot write file CANNOT_WRITE_FILE = 15 -# Cannot overwrite file. +# cannot overwrite file CANNOT_OVERWRITE_FILE = 16 -# Type error occurred. +# type error TYPE_ERROR = 17 -# Timed out waiting for a lock. +# lock timeout LOCK_TIMEOUT = 18 -# Cannot create a directory. +# cannot create directory CANNOT_CREATE_DIRECTORY = 19 -# Cannot create a temporary file. +# cannot create temporary file CANNOT_CREATE_TEMP_FILE = 20 -# Request cancelled by user. +# canceled request REQUEST_CANCELED = 21 -# Raised for debugging. +# intentional debug error DEBUG = 22 -# Invalid IP address. +# IP address is invalid IP_ADDRESS_INVALID = 25 -# File exists already. +# file exists FILE_EXISTS = 27 -# Locked resource or operation. +# locked LOCKED = 28 -# Deadlock detected when accessing collections. +# deadlock detected DEADLOCK = 29 -# Call failed as server shutdown is in progress. +# shutdown in progress SHUTTING_DOWN = 30 -# Feature only for enterprise version of ArangoDB. +# only enterprise version ONLY_ENTERPRISE = 31 -# Resource usage exceeded maximum value. +# resource limit exceeded RESOURCE_LIMIT = 32 -# ICU operation failed. +# icu error: %s ICU_ERROR = 33 -# Cannot read a file. +# cannot read file CANNOT_READ_FILE = 34 -# Incompatible version of ArangoDB. +# incompatible server version INCOMPATIBLE_VERSION = 35 -# Requested resource disabled. +# disabled DISABLED = 36 -# JSON string could not be parsed. +# malformed json MALFORMED_JSON = 37 -# Call cannot succeed because the server startup phase is still in progress. +# startup ongoing STARTING_UP = 38 +# error during deserialization +DESERIALIZE = 39 + +# reached end of file +END_OF_FILE = 40 + ########################### -# HTTP Error Status Codes # +# HTTP error status codes # ########################### -# Bad HTTP parameter. +# bad parameter HTTP_BAD_PARAMETER = 400 -# User unauthorized. +# unauthorized HTTP_UNAUTHORIZED = 401 -# Operation forbidden. +# forbidden HTTP_FORBIDDEN = 403 -# Unknown URI. +# not found HTTP_NOT_FOUND = 404 -# HTTP method unknown. +# method not supported HTTP_METHOD_NOT_ALLOWED = 405 -# HTTP content type not supported. +# request not acceptable HTTP_NOT_ACCEPTABLE = 406 -# Timeout occurred. +# request timeout HTTP_REQUEST_TIMEOUT = 408 -# Conflict occurred in an HTTP operation. +# conflict HTTP_CONFLICT = 409 -# Requested content has been permanently deleted. +# content permanently deleted HTTP_GONE = 410 -# Precondition not met. +# precondition failed HTTP_PRECONDITION_FAILED = 412 -# Internal server error occurred. +# enhance your calm +HTTP_ENHANCE_YOUR_CALM = 420 + +# internal server error HTTP_SERVER_ERROR = 500 -# API is not implemented. +# not implemented HTTP_NOT_IMPLEMENTED = 501 -# Service temporarily unavailable. +# service unavailable HTTP_SERVICE_UNAVAILABLE = 503 -# Service contacted by ArangoDB did not respond in time. +# gateway timeout HTTP_GATEWAY_TIMEOUT = 504 ########################## -# HTTP Processing Errors # +# HTTP processing errors # ########################## -# Corrupted JSON string. +# invalid JSON object HTTP_CORRUPTED_JSON = 600 -# URL contains superfluous suffices. +# superfluous URL suffices HTTP_SUPERFLUOUS_SUFFICES = 601 #################################### -# Internal ArangoDB Storage Errors # +# Internal ArangoDB storage errors # #################################### -# Datafile in illegal state. +# illegal state ILLEGAL_STATE = 1000 -# User attempted to write to a sealed datafile. -DATAFILE_SEALED = 1002 - -# Read-only datafile or collection. +# read only READ_ONLY = 1004 -# Duplicate identifier detected. +# duplicate identifier DUPLICATE_IDENTIFIER = 1005 -# Datafile unreadable. -DATAFILE_UNREADABLE = 1006 - -# Datafile empty. -DATAFILE_EMPTY = 1007 - -# Error occurred during WAL log file recovery. -RECOVERY = 1008 - -# Required datafile statistics object not found. -DATAFILE_STATISTICS_NOT_FOUND = 1009 - #################################### -# External ArangoDB Storage Errors # +# External ArangoDB storage errors # #################################### -# Datafile corrupted. +# corrupted datafile CORRUPTED_DATAFILE = 1100 -# Parameter file corrupted or cannot be read. +# illegal or unreadable parameter file ILLEGAL_PARAMETER_FILE = 1101 -# Collection contains one or more corrupted datafiles. +# corrupted collection CORRUPTED_COLLECTION = 1102 -# System call mmap failed. -MMAP_FAILED = 1103 - -# Filesystem full. +# filesystem full FILESYSTEM_FULL = 1104 -# Cannot create journal. -NO_JOURNAL = 1105 - -# Datafile of the same name already exists. -DATAFILE_ALREADY_EXISTS = 1106 - -# Database directory locked by another process. +# database directory is locked DATADIR_LOCKED = 1107 -# Directory of the same name already exists. -COLLECTION_DIRECTORY_ALREADY_EXISTS = 1108 - -# System call msync failed. -MSYNC_FAILED = 1109 - -# Cannot lock the database directory on startup. -DATADIR_UNLOCKABLE = 1110 - -# Server waited too long for the datafile to be synced to disk. -SYNC_TIMEOUT = 1111 - ################################### -# General ArangoDB Storage Errors # +# General ArangoDB storage errors # ################################### -# Conflict detected while updating or deleting a document. +# conflict CONFLICT = 1200 -# Database directory invalid. -DATADIR_INVALID = 1201 - -# Unknown document identifier or handle. +# document not found DOCUMENT_NOT_FOUND = 1202 -# Collection with given identifier or name unknown. +# collection or view not found DATA_SOURCE_NOT_FOUND = 1203 -# Missing collection parameter. +# parameter 'collection' not found COLLECTION_PARAMETER_MISSING = 1204 -# Invalid document handle. +# illegal document identifier DOCUMENT_HANDLE_BAD = 1205 -# Maximal journal size too small. -MAXIMAL_SIZE_TOO_SMALL = 1206 - -# Duplicate name detected. +# duplicate name DUPLICATE_NAME = 1207 -# Illegal name detected. +# illegal name ILLEGAL_NAME = 1208 -# No suitable index for query. +# no suitable index known NO_INDEX = 1209 -# Unique constraint violation. +# unique constraint violated UNIQUE_CONSTRAINT_VIOLATED = 1210 -# Index with unknown identifier. +# index not found INDEX_NOT_FOUND = 1212 -# Cross-collection requested. +# cross collection request not allowed CROSS_COLLECTION_REQUEST = 1213 -# Index handle corrupted. +# illegal index identifier INDEX_HANDLE_BAD = 1214 -# Document too large to fit into any datafile. +# document too large DOCUMENT_TOO_LARGE = 1216 -# Collection must be unloaded. -COLLECTION_NOT_UNLOADED = 1217 - -# Invalid collection type. +# collection type invalid COLLECTION_TYPE_INVALID = 1218 -# Failed to parse an attribute name definition. +# parsing attribute name definition failed ATTRIBUTE_PARSER_FAILED = 1220 -# Corrupted document key. +# illegal document key DOCUMENT_KEY_BAD = 1221 -# User-defined document key supplied for collections with auto key generation. +# unexpected document key DOCUMENT_KEY_UNEXPECTED = 1222 -# Database directory not writable for current user. +# server database directory not writable DATADIR_NOT_WRITABLE = 1224 -# Key generator out of keys. +# out of keys OUT_OF_KEYS = 1225 -# Document key missing. +# missing document key DOCUMENT_KEY_MISSING = 1226 -# There was an attempt to create a document of invalid type. +# invalid document type DOCUMENT_TYPE_INVALID = 1227 -# Non-existing database accessed. +# database not found DATABASE_NOT_FOUND = 1228 -# Invalid database used. +# database name invalid DATABASE_NAME_INVALID = 1229 -# Operation requested in non-system database. +# operation only allowed in system database USE_SYSTEM_DATABASE = 1230 -# Invalid key generator. +# invalid key generator INVALID_KEY_GENERATOR = 1232 -# Undefined or invalid "_from" or "_to" values in an edge document. +# expecting both `_from` and `_to` attributes to be defined in the edge document and have the format `/` INVALID_EDGE_ATTRIBUTE = 1233 -# Cannot create index. +# index creation failed INDEX_CREATION_FAILED = 1235 -# Server is write-throttled and a write operation waited too long. -WRITE_THROTTLE_TIMEOUT = 1236 - -# Collection type mismatch. +# collection type mismatch COLLECTION_TYPE_MISMATCH = 1237 -# Collection accessed but not yet loaded. +# collection not loaded COLLECTION_NOT_LOADED = 1238 -# Document revision corrupt or missing. +# illegal document revision DOCUMENT_REV_BAD = 1239 -# Read cannot be completed by storage engine. +# incomplete read INCOMPLETE_READ = 1240 +# not supported by old legacy data format +OLD_ROCKSDB_FORMAT = 1241 + +# an index with legacy sorted keys has been found +INDEX_HAS_LEGACY_SORTED_KEYS = 1242 + ################################### -# Checked ArangoDB Storage Errors # +# Checked ArangoDB storage errors # ################################### -# Datafile full. -DATAFILE_FULL = 1300 - -# Server database directory empty. +# server database directory is empty EMPTY_DATADIR = 1301 -# Operation needs to be retried. +# operation should be tried again TRY_AGAIN = 1302 -# Storage engine busy. +# engine is busy BUSY = 1303 -# Datafile merge in progress and the operation cannot be completed. +# merge in progress MERGE_IN_PROGRESS = 1304 -# Storage engine encountered an I/O error. +# storage engine I/O error IO_ERROR = 1305 ############################### -# ArangoDB Replication Errors # +# ArangoDB replication errors # ############################### -# Replication applier received no (or incomplete) response from master. +# no response REPLICATION_NO_RESPONSE = 1400 -# Replication applier received an invalid response from master. +# invalid response REPLICATION_INVALID_RESPONSE = 1401 -# Replication applier received a server error from master. -REPLICATION_MASTER_ERROR = 1402 +# leader error +REPLICATION_LEADER_ERROR = 1402 -# Replication applier tried to connect to master with incompatible version. -REPLICATION_MASTER_INCOMPATIBLE = 1403 +# leader incompatible +REPLICATION_LEADER_INCOMPATIBLE = 1403 -# Replication applier connected to a different master than before. -REPLICATION_MASTER_CHANGE = 1404 +# leader change +REPLICATION_LEADER_CHANGE = 1404 -# Replication applier was asked to connect to itself for replication. +# loop detected REPLICATION_LOOP = 1405 -# Unexpected marker found in replication log stream. +# unexpected marker REPLICATION_UNEXPECTED_MARKER = 1406 -# Found invalid replication applier state file. +# invalid applier state REPLICATION_INVALID_APPLIER_STATE = 1407 -# Found unexpected transaction ID. +# invalid transaction REPLICATION_UNEXPECTED_TRANSACTION = 1408 -# Synchronization of a shard takes longer than the configured timeout. +# shard synchronization attempt timeout exceeded REPLICATION_SHARD_SYNC_ATTEMPT_TIMEOUT_EXCEEDED = 1409 -# Invalid replication applier configuration. +# invalid replication applier configuration REPLICATION_INVALID_APPLIER_CONFIGURATION = 1410 -# Operation attempted while replication applier is running. +# cannot perform operation while applier is running REPLICATION_RUNNING = 1411 -# Replication applier stopped by user. +# replication stopped REPLICATION_APPLIER_STOPPED = 1412 -# Replication applier started without a known start tick value. +# no start tick REPLICATION_NO_START_TICK = 1413 -# Replication applier started without a known start tick value. +# start tick not present REPLICATION_START_TICK_NOT_PRESENT = 1414 -# Newborn follower submits a wrong checksum. +# wrong checksum REPLICATION_WRONG_CHECKSUM = 1416 -# Shard is not empty and follower tries a shortcut. +# shard not empty REPLICATION_SHARD_NONEMPTY = 1417 -# Specific replicated log is not found +# replicated log {} not found REPLICATION_REPLICATED_LOG_NOT_FOUND = 1418 -# Participant of a replicated log is ordered to do something only the leader can do. +# not the log leader REPLICATION_REPLICATED_LOG_NOT_THE_LEADER = 1419 -# Participant of a replicated log is ordered to do something only a follower can do. +# not a log follower REPLICATION_REPLICATED_LOG_NOT_A_FOLLOWER = 1420 -# Follower of a replicated log rejects an append-entries request. +# follower rejected append entries request REPLICATION_REPLICATED_LOG_APPEND_ENTRIES_REJECTED = 1421 -# Leader instance of a replicated log rejects a request because it just resigned. -# This can also happen if the term changes (due to a configuration change). +# a resigned leader instance rejected a request REPLICATION_REPLICATED_LOG_LEADER_RESIGNED = 1422 -# Follower instance of a replicated log rejects a request because it just resigned. -# This can also happen if the term changes (due to a configuration change). +# a resigned follower instance rejected a request REPLICATION_REPLICATED_LOG_FOLLOWER_RESIGNED = 1423 -# Participant instance of a replicated log is no longer available. +# the replicated log of the participant is gone REPLICATION_REPLICATED_LOG_PARTICIPANT_GONE = 1424 -# Participant tries to change its term but found an invalid new term. +# an invalid term was given REPLICATION_REPLICATED_LOG_INVALID_TERM = 1425 -# Participant is currently unconfigured. +# log participant unconfigured REPLICATION_REPLICATED_LOG_UNCONFIGURED = 1426 -# Specific replicated state was not found. +# replicated state {id:} of type {type:} not found REPLICATION_REPLICATED_STATE_NOT_FOUND = 1427 +# replicated state {id:} of type {type:} is unavailable +REPLICATION_REPLICATED_STATE_NOT_AVAILABLE = 1428 + +# not enough replicas for the configured write-concern are present +REPLICATION_WRITE_CONCERN_NOT_FULFILLED = 1429 + +# operation aborted because a previous operation failed +REPLICATION_REPLICATED_LOG_SUBSEQUENT_FAULT = 1430 + +# replicated state type {type:} is unavailable +REPLICATION_REPLICATED_STATE_IMPLEMENTATION_NOT_FOUND = 1431 + +# error in the replicated WAL subsystem +REPLICATION_REPLICATED_WAL_ERROR = 1432 + +# replicated WAL {file:} has an invalid or missing file header +REPLICATION_REPLICATED_WAL_INVALID_FILE = 1433 + +# replicated WAL {file:} is corrupt +REPLICATION_REPLICATED_WAL_CORRUPT = 1434 + ########################### -# ArangoDB Cluster Errors # +# ArangoDB cluster errors # ########################### -# Operation is sent to a non-following server. +# not a follower CLUSTER_NOT_FOLLOWER = 1446 -# Follower transaction already performed an intermediate commit and must be rolled back. +# follower transaction intermediate commit already performed CLUSTER_FOLLOWER_TRANSACTION_COMMIT_PERFORMED = 1447 -# Updating the plan on collection creation failed. +# creating collection failed due to precondition CLUSTER_CREATE_COLLECTION_PRECONDITION_FAILED = 1448 -# Raised on some occasions when one server gets a request from another. +# got a request from an unknown server CLUSTER_SERVER_UNKNOWN = 1449 -# Number of shards for a collection is higher than allowed. +# too many shards CLUSTER_TOO_MANY_SHARDS = 1450 -# Coordinator cannot create a collection as the collection ID already exists. -CLUSTER_COLLECTION_ID_EXISTS = 1453 - -# Coordinator cannot create an entry for a new collection in Plan hierarchy. +# could not create collection in plan CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN = 1454 -# Coordinator sees DBServer issues when creating shards for a new collection. +# could not create collection CLUSTER_COULD_NOT_CREATE_COLLECTION = 1456 -# Coordinator runs into a timeout for some cluster wide operation. +# timeout in cluster operation CLUSTER_TIMEOUT = 1457 -# Coordinator cannot remove an entry for a collection in Plan hierarchy. +# could not remove collection from plan CLUSTER_COULD_NOT_REMOVE_COLLECTION_IN_PLAN = 1458 -# Coordinator cannot remove an entry for a collection in Current hierarchy. -CLUSTER_COULD_NOT_REMOVE_COLLECTION_IN_CURRENT = 1459 - -# Coordinator cannot create an entry for a new database in the Plan hierarchy. +# could not create database in plan CLUSTER_COULD_NOT_CREATE_DATABASE_IN_PLAN = 1460 -# Coordinator sees DBServer issues when creating databases for a new cluster. +# could not create database CLUSTER_COULD_NOT_CREATE_DATABASE = 1461 -# Coordinator cannot remove an entry for a database in the Plan hierarchy. +# could not remove database from plan CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_PLAN = 1462 -# Coordinator cannot remove an entry for a database in the Current hierarchy. +# could not remove database from current CLUSTER_COULD_NOT_REMOVE_DATABASE_IN_CURRENT = 1463 -# Coordinator cannot determine the shard responsible for a given document. +# no responsible shard found CLUSTER_SHARD_GONE = 1464 -# Coordinator loses HTTP connection to a DBServer while transferring data. +# cluster internal HTTP connection broken CLUSTER_CONNECTION_LOST = 1465 -# "_key" attribute specified in sharded collection which uses not only "_key" -# as sharding attribute. +# must not specify _key for this collection CLUSTER_MUST_NOT_SPECIFY_KEY = 1466 -# Coordinator gets conflicting results from different shards. +# got contradicting answers from different shards CLUSTER_GOT_CONTRADICTING_ANSWERS = 1467 -# Coordinator tries to find out the shard responsible for a partial document. +# not all sharding attributes given CLUSTER_NOT_ALL_SHARDING_ATTRIBUTES_GIVEN = 1468 -# Not allowed to update the value of a shard attribute. +# must not change the value of a shard key attribute CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES = 1469 -# Operation not supported in sharded collection. +# unsupported operation or parameter for clusters CLUSTER_UNSUPPORTED = 1470 -# Operation is coordinator-only. +# this operation is only valid on a coordinator in a cluster CLUSTER_ONLY_ON_COORDINATOR = 1471 -# Coordinator or DBServer cannot read the Plan. +# error reading Plan in agency CLUSTER_READING_PLAN_AGENCY = 1472 -# Coordinator cannot truncate all shards of a cluster collection. -CLUSTER_COULD_NOT_TRUNCATE_COLLECTION = 1473 - -# Internal communication of the cluster for AQL produces an error. +# error in cluster internal communication for AQL CLUSTER_AQL_COMMUNICATION = 1474 -# Operation is DBServer-only. +# this operation is only valid on a DBserver in a cluster CLUSTER_ONLY_ON_DBSERVER = 1477 -# Cannot reach a required DBServer. +# A cluster backend which was required for the operation could not be reached CLUSTER_BACKEND_UNAVAILABLE = 1478 -# Required collection out of sync during AQL execution. +# collection/view is out of sync CLUSTER_AQL_COLLECTION_OUT_OF_SYNC = 1481 -# Coordinator cannot create an entry for a new index in Plan hierarchy. +# could not create index in plan CLUSTER_COULD_NOT_CREATE_INDEX_IN_PLAN = 1482 -# Coordinator cannot remove an index from Plan hierarchy. +# could not drop index in plan CLUSTER_COULD_NOT_DROP_INDEX_IN_PLAN = 1483 -# One tries to create a collection with "shards_like" attribute which points -# to another collection that also has one. +# chain of distributeShardsLike references CLUSTER_CHAIN_OF_DISTRIBUTESHARDSLIKE = 1484 -# One tries to drop a collection to which another collection points with its -# "shard_like" attribute. +# must not drop collection while another has a distributeShardsLike attribute pointing to it CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE = 1485 -# One tries to create a collection which points to an unknown collection in its -# "shard_like" attribute. +# must not have a distributeShardsLike attribute pointing to an unknown collection CLUSTER_UNKNOWN_DISTRIBUTESHARDSLIKE = 1486 -# One tries to create a collection with a "replication_factor" greater than the -# available number of DBServers. +# the number of current DB-Servers is lower than the requested replicationFactor/writeConcern CLUSTER_INSUFFICIENT_DBSERVERS = 1487 -# Cannot drop follower. +# a follower could not be dropped in agency CLUSTER_COULD_NOT_DROP_FOLLOWER = 1488 -# Replication operation refused by a shard leader. +# a shard leader refuses to perform a replication operation CLUSTER_SHARD_LEADER_REFUSES_REPLICATION = 1489 -# Non-replication operation refused by a shard follower. +# a shard follower refuses to perform an operation CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION = 1490 -# Shard leader resigned in the meantime. +# a (former) shard leader refuses to perform an operation CLUSTER_SHARD_LEADER_RESIGNED = 1491 -# Agency operation failed after various retries. +# some agency operation failed CLUSTER_AGENCY_COMMUNICATION_FAILED = 1492 -# Servers currently competing for leadership. +# leadership challenge is ongoing CLUSTER_LEADERSHIP_CHALLENGE_ONGOING = 1495 -# Operation sent to a non-leading server. +# not a leader CLUSTER_NOT_LEADER = 1496 -# Coordinator cannot create an entry for a new view in Plan hierarchy. +# could not create view in plan CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN = 1497 -# Coordinator tries to create a view and the ID already exists. +# view ID already exists CLUSTER_VIEW_ID_EXISTS = 1498 -# Coordinator cannot drop a collection entry in Plan hierarchy. +# could not drop collection in plan CLUSTER_COULD_NOT_DROP_COLLECTION = 1499 ######################### -# ArangoDB Query Errors # +# ArangoDB query errors # ######################### -# Running query killed by an explicit admin command. +# query killed QUERY_KILLED = 1500 -# Parsed query syntactically invalid. +# %s QUERY_PARSE = 1501 -# Empty query specified. +# query is empty QUERY_EMPTY = 1502 -# Runtime error caused by query. +# runtime error '%s' QUERY_SCRIPT = 1503 -# Number out of range. +# number out of range QUERY_NUMBER_OUT_OF_RANGE = 1504 -# Geo index coordinate invalid or out of range. +# invalid geo coordinate value QUERY_INVALID_GEO_VALUE = 1505 -# Invalid variable name. +# variable name '%s' has an invalid format QUERY_VARIABLE_NAME_INVALID = 1510 -# Variable redeclared in a query. +# variable '%s' is assigned multiple times QUERY_VARIABLE_REDECLARED = 1511 -# Variable name unknown or undefined. +# unknown variable '%s' QUERY_VARIABLE_NAME_UNKNOWN = 1512 -# Cannot acquire lock on collection. +# unable to read-lock collection %s QUERY_COLLECTION_LOCK_FAILED = 1521 -# Too many collections or shards in a query. +# too many collections/shards QUERY_TOO_MANY_COLLECTIONS = 1522 -# Document attribute redeclared. -QUERY_DOCUMENT_ATTRIBUTE_REDECLARED = 1530 +# too much nesting or too many objects +QUERY_TOO_MUCH_NESTING = 1524 -# Unknown attribute is used inside an OPTIONS clause. +# unknown/invalid OPTIONS attribute used QUERY_INVALID_OPTIONS_ATTRIBUTE = 1539 -# Undefined function called. +# usage of unknown function '%s()' QUERY_FUNCTION_NAME_UNKNOWN = 1540 -# Argument number mismatch. +# invalid number of arguments for function '%s()' QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH = 1541 -# Argument type mismatch. +# invalid argument type in call to function '%s()' QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH = 1542 -# Invalid regex. +# invalid regex value QUERY_INVALID_REGEX = 1543 -# Invalid bind parameters. +# invalid structure of bind parameters QUERY_BIND_PARAMETERS_INVALID = 1550 -# Bind parameter missing. +# no value specified for declared bind parameter '%s' QUERY_BIND_PARAMETER_MISSING = 1551 -# Bind parameter undeclared. +# bind parameter '%s' was not declared in the query QUERY_BIND_PARAMETER_UNDECLARED = 1552 -# Invalid bind parameter value or type. +# bind parameter '%s' has an invalid value or type QUERY_BIND_PARAMETER_TYPE = 1553 -# Non-boolean value used in logical operation. -QUERY_INVALID_LOGICAL_VALUE = 1560 +# failed vector search +QUERY_VECTOR_SEARCH_NOT_APPLIED = 1554 -# Non-numeric value used in arithmetic operation. +# invalid arithmetic value QUERY_INVALID_ARITHMETIC_VALUE = 1561 -# Divide by zero. +# division by zero QUERY_DIVISION_BY_ZERO = 1562 -# Non-list operand used when expecting a list operand. +# array expected QUERY_ARRAY_EXPECTED = 1563 -# Collection is used as an operand in an AQL expression +# collection '%s' used as expression operand QUERY_COLLECTION_USED_IN_EXPRESSION = 1568 -# Function "FAIL()" called inside a query. +# FAIL(%s) called QUERY_FAIL_CALLED = 1569 -# Geo restriction specified but no suitable geo index found. +# no suitable geo index found for geo restriction on '%s' QUERY_GEO_INDEX_MISSING = 1570 -# Fulltext query performed on a collection without suitable fulltext index. +# no suitable fulltext index found for fulltext query on '%s' QUERY_FULLTEXT_INDEX_MISSING = 1571 -# Cannot convert value to a date. +# invalid date value QUERY_INVALID_DATE_VALUE = 1572 -# Query contains more than one data-modifying operation. +# multi-modify query QUERY_MULTI_MODIFY = 1573 -# Query contains an invalid aggregate expression. +# invalid aggregate expression QUERY_INVALID_AGGREGATE_EXPRESSION = 1574 -# Query contains options that cannot be resolved at query compile time. +# query options must be readable at query compile time QUERY_COMPILE_TIME_OPTIONS = 1575 -# Query contains an invalid options specification. -QUERY_EXCEPTION_OPTIONS = 1576 +# FILTER/PRUNE condition complexity is too high +QUERY_DNF_COMPLEXITY = 1576 -# Unusable index hint. +# could not use forced index hint QUERY_FORCED_INDEX_HINT_UNUSABLE = 1577 -# Dynamic function not allowed. +# disallowed dynamic call to '%s' QUERY_DISALLOWED_DYNAMIC_CALL = 1578 -# Collection data accessed after modification. +# access after data-modification by %s QUERY_ACCESS_AFTER_MODIFICATION = 1579 ############################ -# AQL User Function Errors # +# AQL user function errors # ############################ -# User function registered with invalid name. +# invalid user function name QUERY_FUNCTION_INVALID_NAME = 1580 -# User function registered with invalid code. +# invalid user function code QUERY_FUNCTION_INVALID_CODE = 1581 -# User function not found. +# user function '%s()' not found QUERY_FUNCTION_NOT_FOUND = 1582 -# Runtime exception raised by query function. +# user function runtime error: %s QUERY_FUNCTION_RUNTIME_ERROR = 1583 +# query is not eligible for plan caching +QUERY_NOT_ELIGIBLE_FOR_PLAN_CACHING = 1584 + ############################# -# AQL Query Registry Errors # +# AQL query registry errors # ############################# -# Query received an invalid JSON. +# bad execution plan JSON QUERY_BAD_JSON_PLAN = 1590 -# Query ID not found. +# query ID not found QUERY_NOT_FOUND = 1591 -# User provided expression does not evaluate to true. +# %s QUERY_USER_ASSERT = 1593 -# User provided expression does not evaluate to true. +# %s QUERY_USER_WARN = 1594 -# Window node is created after a data-modification operation. +# window operation after data-modification QUERY_WINDOW_AFTER_MODIFICATION = 1595 ########################## -# ArangoDB Cursor Errors # +# ArangoDB cursor errors # ########################## -# Cursor ID not found. +# cursor not found CURSOR_NOT_FOUND = 1600 -# Concurrent request still using the cursor. +# cursor is busy CURSOR_BUSY = 1601 -############################## -# ArangoDB Validation Errors # -############################## +##################################### +# ArangoDB schema validation errors # +##################################### -# Document does not pass schema validation. +# schema validation failed VALIDATION_FAILED = 1620 -# Schema description is invalid. +# invalid schema validation parameter VALIDATION_BAD_PARAMETER = 1621 ############################### -# ArangoDB Transaction Errors # +# ArangoDB transaction errors # ############################### -# Wrong usage of transactions. This is an internal error. +# internal transaction error TRANSACTION_INTERNAL = 1650 -# Nested transactions. +# nested transactions detected TRANSACTION_NESTED = 1651 -# Unregistered collection used in transaction. +# unregistered collection used in transaction TRANSACTION_UNREGISTERED_COLLECTION = 1652 -# Disallowed operation in transaction. +# disallowed operation inside transaction TRANSACTION_DISALLOWED_OPERATION = 1653 -# Transaction aborted. +# transaction aborted TRANSACTION_ABORTED = 1654 -# Transaction not found. +# transaction not found TRANSACTION_NOT_FOUND = 1655 ########################## -# User Management Errors # +# User management errors # ########################## -# Invalid username. +# invalid user name USER_INVALID_NAME = 1700 -# Username already exists. +# duplicate user USER_DUPLICATE = 1702 -# User not found. +# user not found USER_NOT_FOUND = 1703 -# User authenticated by an external server. +# user is external USER_EXTERNAL = 1705 ###################################### -# Service Management Errors (Legacy) # +# Service management errors (legacy) # ###################################### -# Cannot download service from central repository. +# service download failed SERVICE_DOWNLOAD_FAILED = 1752 -# Service upload from the client to the ArangoDB server failed. +# service upload failed SERVICE_UPLOAD_FAILED = 1753 ############### -# LDAP Errors # -############### - -# Cannot initialize an LDAP connection. -LDAP_CANNOT_INIT = 1800 - -# Cannot set an LDAP option. -LDAP_CANNOT_SET_OPTION = 1801 - -# Cannot bind to an LDAP server. -LDAP_CANNOT_BIND = 1802 - -# Cannot unbind from an LDAP server. -LDAP_CANNOT_UNBIND = 1803 - -# Cannot search the LDAP server. -LDAP_CANNOT_SEARCH = 1804 - -# Cannot start a TLS LDAP session. -LDAP_CANNOT_START_TLS = 1805 - -# LDAP did not find any objects with the specified search query. -LDAP_FOUND_NO_OBJECTS = 1806 - -# LDAP found zero or more than one user. -LDAP_NOT_ONE_USER_FOUND = 1807 - -# LDAP user not identified. -LDAP_USER_NOT_IDENTIFIED = 1808 - -# Cannot distinguish a valid mode for provided LDAP configuration. -LDAP_INVALID_MODE = 1820 - -############### -# Task Errors # +# Task errors # ############### -# Task created with an invalid ID. +# invalid task id TASK_INVALID_ID = 1850 -# Task created with a duplicate ID. +# duplicate task id TASK_DUPLICATE_ID = 1851 -# Task not found. +# task not found TASK_NOT_FOUND = 1852 ############################ -# Graph / Traversal Errors # +# Graph / traversal errors # ############################ -# Invalid name passed to the server. +# invalid graph GRAPH_INVALID_GRAPH = 1901 -# Invalid graph name passed to the server. -GRAPH_COULD_NOT_CREATE_GRAPH = 1902 - -# Invalid vertex ID passed to the server. -GRAPH_INVALID_VERTEX = 1903 - -# Vertex could not be created. -GRAPH_COULD_NOT_CREATE_VERTEX = 1904 - -# Vertex could not be changed. -GRAPH_COULD_NOT_CHANGE_VERTEX = 1905 - -# Invalid edge ID passed to the server. +# invalid edge GRAPH_INVALID_EDGE = 1906 -# Edge could not be created. -GRAPH_COULD_NOT_CREATE_EDGE = 1907 - -# Edge could not be changed. -GRAPH_COULD_NOT_CHANGE_EDGE = 1908 - -# Too many iterations in graph traversal. -GRAPH_TOO_MANY_ITERATIONS = 1909 - -# Invalid filter result returned in graph traversal. +# invalid filter result GRAPH_INVALID_FILTER_RESULT = 1910 -# Edge collection may only be used once in an edge definition. +# multi use of edge collection in edge def GRAPH_COLLECTION_MULTI_USE = 1920 -# Collection already used by another graph in a different edge definition. +# edge collection already used in edge def GRAPH_COLLECTION_USE_IN_MULTI_GRAPHS = 1921 -# Graph name missing. +# missing graph name GRAPH_CREATE_MISSING_NAME = 1922 -# Edge definition malformed (must be a list of dicts). +# malformed edge definition GRAPH_CREATE_MALFORMED_EDGE_DEFINITION = 1923 -# Graph not found. +# graph '%s' not found GRAPH_NOT_FOUND = 1924 -# Graph name already exists. +# graph already exists GRAPH_DUPLICATE = 1925 -# Vertex collection does not exist or is not part of the graph. +# vertex collection does not exist or is not part of the graph GRAPH_VERTEX_COL_DOES_NOT_EXIST = 1926 -# Collection not a vertex collection. +# collection not a vertex collection GRAPH_WRONG_COLLECTION_TYPE_VERTEX = 1927 -# Vertex collection not in orphan collections of the graph. +# collection is not in list of orphan collections GRAPH_NOT_IN_ORPHAN_COLLECTION = 1928 -# Collection already used in an edge definition of the graph. +# collection already used in edge def GRAPH_COLLECTION_USED_IN_EDGE_DEF = 1929 -# Edge collection not used in any edge definition of the graph. +# edge collection not used in graph GRAPH_EDGE_COLLECTION_NOT_USED = 1930 -# Collection "_graphs" does not exist. +# collection _graphs does not exist GRAPH_NO_GRAPH_COLLECTION = 1932 -# Invalid example array object string. -GRAPH_INVALID_EXAMPLE_ARRAY_OBJECT_STRING = 1933 - -# Invalid example type (must be a list or dict). -GRAPH_INVALID_EXAMPLE_ARRAY_OBJECT = 1934 - -# Invalid number of arguments. +# Invalid number of arguments. Expected: GRAPH_INVALID_NUMBER_OF_ARGUMENTS = 1935 # Invalid parameter type. GRAPH_INVALID_PARAMETER = 1936 -# Invalid ID. -GRAPH_INVALID_ID = 1937 - -# Collection already in orphans of the graph. +# collection used in orphans GRAPH_COLLECTION_USED_IN_ORPHANS = 1938 -# Edge collection does not exist or is not part of the graph. +# edge collection does not exist or is not part of the graph GRAPH_EDGE_COL_DOES_NOT_EXIST = 1939 -# Graph has no edge collections. +# empty graph GRAPH_EMPTY = 1940 -# Invalid data in "_graphs" collection. +# internal graph data corrupt GRAPH_INTERNAL_DATA_CORRUPT = 1941 -# Edge collection already defined. -GRAPH_INTERNAL_EDGE_COLLECTION_ALREADY_SET = 1942 +# must not drop collection while part of graph +GRAPH_MUST_NOT_DROP_COLLECTION = 1942 -# Orphan list argument malformed. Must be a list of strings. +# malformed orphan list GRAPH_CREATE_MALFORMED_ORPHAN_LIST = 1943 -# Collection used as a relation exists. +# edge definition collection is a document collection GRAPH_EDGE_DEFINITION_IS_DOCUMENT = 1944 -# The collection is used as the initial collection of this graph and is not allowed to -# be removed manually. +# initial collection is not allowed to be removed manually GRAPH_COLLECTION_IS_INITIAL = 1945 -# During the graph creation process no collection could be selected as the needed -# initial collection. Happens if a distributeShardsLike or replicationFactor mismatch -# was found. +# no valid initial collection found GRAPH_NO_INITIAL_COLLECTION = 1946 -# The _from or _to collection specified for the edge refers to a vertex collection which -# is not used in any edge definition of the graph. -GRAPH_REFERENCED_VERTEX_COLLECTION_NOT_USED = 1947 +# referenced vertex collection is not part of the graph +GRAPH_REFERENCED_VERTEX_COLLECTION_NOT_PART_OF_THE_GRAPH = 1947 -# Negative edge weight found during a weighted graph traversal or shortest path query. +# negative edge weight found GRAPH_NEGATIVE_EDGE_WEIGHT = 1948 +# the given collection is not part of the graph +GRAPH_COLLECTION_NOT_PART_OF_THE_GRAPH = 1949 + ################## -# Session Errors # +# Session errors # ################## -# Invalid/unknown session ID passed to the server. +# unknown session SESSION_UNKNOWN = 1950 -# Session expired. +# session expired SESSION_EXPIRED = 1951 ######################## -# Simple Client Errors # +# Simple Client errors # ######################## -# This error should not happen. +# unknown client error SIMPLE_CLIENT_UNKNOWN_ERROR = 2000 -# Client could not connect to server. +# could not connect to server SIMPLE_CLIENT_COULD_NOT_CONNECT = 2001 -# Client could not write data. +# could not write to server SIMPLE_CLIENT_COULD_NOT_WRITE = 2002 -# Client could not read data. +# could not read from server SIMPLE_CLIENT_COULD_NOT_READ = 2003 -# Will be raised if was erlaube?! +# was erlaube?! WAS_ERLAUBE = 2019 ####################### -# Communicator Errors # +# internal AQL errors # ####################### -# Communicator request aborted. -COMMUNICATOR_REQUEST_ABORTED = 2100 - -# Communicator disabled. -COMMUNICATOR_DISABLED = 2101 - -####################### -# Internal AQL errors # -####################### - -# Internal error during AQL execution. +# General internal AQL error INTERNAL_AQL = 2200 -# AQL block wrote in too few output registers. -WROTE_TOO_FEW_OUTPUT_REGISTERS = 2201 - -# AQL block wrote in too many output registers. -WROTE_TOO_MANY_OUTPUT_REGISTERS = 2202 - -# AQL block wrote in an output register twice. -WROTE_OUTPUT_REGISTER_TWICE = 2203 - -# AQL block wrote in a register that is not its output. -WROTE_IN_WRONG_REGISTER = 2204 - -# AQL block did not copy its input registers. -INPUT_REGISTERS_NOT_COPIED = 2205 - ########################## -# Foxx Management Errors # +# Foxx management errors # ########################## -# Service manifest file not a well-formed JSON. +# failed to parse manifest file MALFORMED_MANIFEST_FILE = 3000 -# Service manifest contains invalid values. +# manifest file is invalid INVALID_SERVICE_MANIFEST = 3001 -# Service folder or bundle does not exist on the server. +# service files missing SERVICE_FILES_MISSING = 3002 -# Local service bundle does not match the checksum in the database. +# service files outdated SERVICE_FILES_OUTDATED = 3003 -# Service options contain invalid values. +# service options are invalid INVALID_FOXX_OPTIONS = 3004 -# Service mountpath contains invalid characters. +# invalid mountpath INVALID_MOUNTPOINT = 3007 -# No service found at given mountpath. +# service not found SERVICE_NOT_FOUND = 3009 -# Service missing configuration or dependencies. +# service needs configuration SERVICE_NEEDS_CONFIGURATION = 3010 -# Service already exists at given mountpath. +# service already exists SERVICE_MOUNTPOINT_CONFLICT = 3011 -# Service directory does not contain a manifest file. +# missing manifest file SERVICE_MANIFEST_NOT_FOUND = 3012 -# Service options are not well-formed JSONs. +# failed to parse service options SERVICE_OPTIONS_MALFORMED = 3013 -# Source path does not match a file or directory. +# source path not found SERVICE_SOURCE_NOT_FOUND = 3014 -# Source path could not be resolved. +# error resolving source SERVICE_SOURCE_ERROR = 3015 -# Unknown service script. +# unknown script SERVICE_UNKNOWN_SCRIPT = 3016 -# API for managing Foxx services disabled. +# service api disabled SERVICE_API_DISABLED = 3099 ################################### -# JavaScript Module Loader Errors # +# JavaScript module loader errors # ################################### -# Cannot resolve module path. +# cannot locate module MODULE_NOT_FOUND = 3100 -# Module could not be parsed because of a syntax error. +# syntax error in module MODULE_SYNTAX_ERROR = 3101 -# Failed to invoke the module in its context. +# failed to invoke module MODULE_FAILURE = 3103 -##################### -# Enterprise Errors # -##################### +############################# +# Enterprise Edition errors # +############################# -# Requested collection needs to be smart. +# collection is not smart NO_SMART_COLLECTION = 4000 -# Given document does not have the smart graph attribute set. +# smart graph attribute not given NO_SMART_GRAPH_ATTRIBUTE = 4001 -# Smart collection cannot be dropped. +# cannot drop this smart collection CANNOT_DROP_SMART_COLLECTION = 4002 -# "_key" not prefixed with the value of the smart graph attribute. +# in smart vertex collections _key must be a string and prefixed with the value of the smart graph attribute KEY_MUST_BE_PREFIXED_WITH_SMART_GRAPH_ATTRIBUTE = 4003 -# Given smart graph attribute is illegal and cannot be used for sharding. +# attribute cannot be used as smart graph attribute ILLEGAL_SMART_GRAPH_ATTRIBUTE = 4004 -# Smart graph attribute of collection does not match the attribute of graph. +# smart graph attribute mismatch SMART_GRAPH_ATTRIBUTE_MISMATCH = 4005 -# Invalid smart join attribute declaration. +# invalid smart join attribute declaration INVALID_SMART_JOIN_ATTRIBUTE = 4006 -# Key must be prefixed with smart join attribute. +# shard key value must be prefixed with the value of the smart join attribute KEY_MUST_BE_PREFIXED_WITH_SMART_JOIN_ATTRIBUTE = 4007 -# Document lacks required smart join attribute. +# smart join attribute not given or invalid NO_SMART_JOIN_ATTRIBUTE = 4008 -# Cannot update the value of the smart join attribute. +# must not change the value of the smartJoinAttribute CLUSTER_MUST_NOT_CHANGE_SMART_JOIN_ATTRIBUTE = 4009 -# There was an attempt to create an edge between separated graph components. +# non disjoint edge found INVALID_DISJOINT_SMART_EDGE = 4010 -# Switching back and forth between Satellite and Smart in Disjoint SmartGraph is not -# supported within a single AQL statement. Split into multiple statements. +# Unsupported alternating Smart and Satellite in Disjoint SmartGraph. UNSUPPORTED_CHANGE_IN_SMART_TO_SATELLITE_DISJOINT_EDGE_DIRECTION = 4011 -######################### -# Cluster Repair Errors # -######################### - -# General error during cluster repairs. -CLUSTER_REPAIRS_FAILED = 5000 - -# Cluster repairs not healthy enough. -CLUSTER_REPAIRS_NOT_ENOUGH_HEALTHY = 5001 - -# Raised on various inconsistencies regarding the replication factor. -CLUSTER_REPAIRS_REPLICATION_FACTOR_VIOLATED = 5002 - -# Repaired collection has some shards without DBServers. -CLUSTER_REPAIRS_NO_DBSERVERS = 5003 - -# Shard in collection and its prototype in the corresponding "shard_like" -# collection have mismatching leaders. -CLUSTER_REPAIRS_MISMATCHING_LEADERS = 5004 - -# Shard in collection and its prototype in the corresponding "shard_like" -# collection don't have the same followers. -CLUSTER_REPAIRS_MISMATCHING_FOLLOWERS = 5005 - -# Repaired collection does not have "shard_like" as expected. -CLUSTER_REPAIRS_INCONSISTENT_ATTRIBUTES = 5006 - -# Collection and its "shard_like" prototype have unequal number of DBServers. -CLUSTER_REPAIRS_MISMATCHING_SHARDS = 5007 - -# Move shard job failed during cluster repairs. -CLUSTER_REPAIRS_JOB_FAILED = 5008 - -# Move shard job disappeared before finishing. -CLUSTER_REPAIRS_JOB_DISAPPEARED = 5009 - -# Agency transaction failed during either sending or executing it. -CLUSTER_REPAIRS_OPERATION_FAILED = 5010 - ################# -# Agency Errors # +# Agency errors # ################# -# Malformed gossip message. +# malformed gossip message AGENCY_MALFORMED_GOSSIP_MESSAGE = 20001 -# Malformed inquire request. +# malformed inquire request AGENCY_MALFORMED_INQUIRE_REQUEST = 20002 # Inform message must be an object. AGENCY_INFORM_MUST_BE_OBJECT = 20011 -# Inform message must contain a uint parameter 'term'. +# Inform message must contain uint parameter 'term' AGENCY_INFORM_MUST_CONTAIN_TERM = 20012 -# Inform message must contain a string parameter 'ID'. +# Inform message must contain string parameter 'id' AGENCY_INFORM_MUST_CONTAIN_ID = 20013 -# Inform message must contain an array 'active'. +# Inform message must contain array 'active' AGENCY_INFORM_MUST_CONTAIN_ACTIVE = 20014 -# Inform message must contain an object 'pool'. +# Inform message must contain object 'pool' AGENCY_INFORM_MUST_CONTAIN_POOL = 20015 -# Inform message must contain an object 'min ping'. +# Inform message must contain object 'min ping' AGENCY_INFORM_MUST_CONTAIN_MIN_PING = 20016 -# Inform message must contain an object 'max ping'. +# Inform message must contain object 'max ping' AGENCY_INFORM_MUST_CONTAIN_MAX_PING = 20017 -# Inform message must contain an object 'timeoutMult'. +# Inform message must contain object 'timeoutMult' AGENCY_INFORM_MUST_CONTAIN_TIMEOUT_MULT = 20018 -# Cannot rebuild readDB or the spearHead from replicated log. +# Cannot rebuild readDB and spearHead AGENCY_CANNOT_REBUILD_DBS = 20021 -# Malformed agency transaction. +# malformed agency transaction AGENCY_MALFORMED_TRANSACTION = 20030 ###################### -# Supervision Errors # +# Supervision errors # ###################### -# General supervision failure. +# general supervision failure SUPERVISION_GENERAL_FAILURE = 20501 #################### -# Scheduler Errors # +# Scheduler errors # #################### -# Queue is full. +# queue is full QUEUE_FULL = 21003 -# Request with a queue time requirement is set and cannot be fulfilled. +# queue time violated QUEUE_TIME_REQUIREMENT_VIOLATED = 21004 +# too many detached scheduler threads +TOO_MANY_DETACHED_THREADS = 21005 + ###################### -# Maintenance Errors # +# Maintenance errors # ###################### -# Maintenance action cannot be stopped once started. -ACTION_OPERATION_UNABORTABLE = 6002 - -# This maintenance action is still processing. +# maintenance action still processing ACTION_UNFINISHED = 6003 -# No such maintenance action exists. -NO_SUCH_ACTION = 6004 - ######################### -# Backup/Restore Errors # +# Backup/Restore errors # ######################### -# Failed to create hot backup set. +# internal hot backup error HOT_BACKUP_INTERNAL = 7001 -# Failed to restore to hot backup set. +# internal hot restore error HOT_RESTORE_INTERNAL = 7002 -# The hot backup set cannot be restored on non-matching cluster topology. +# backup does not match this topology BACKUP_TOPOLOGY = 7003 -# No space left on device. +# no space left on device NO_SPACE_LEFT_ON_DEVICE = 7004 -# Failed to upload hot backup set to remote target. +# failed to upload hot backup set to remote target FAILED_TO_UPLOAD_BACKUP = 7005 -# Failed to download hot backup set from remote source. +# failed to download hot backup set from remote source FAILED_TO_DOWNLOAD_BACKUP = 7006 -# Cannot find a hot backup set with this ID. +# no such hot backup set can be found NO_SUCH_HOT_BACKUP = 7007 -# Invalid remote repository configuration. +# remote hotback repository configuration error REMOTE_REPOSITORY_CONFIG_BAD = 7008 -# Some DB servers cannot be reached for transaction locks. +# some db servers cannot be reached for transaction locks LOCAL_LOCK_FAILED = 7009 -# Some DB servers cannot be reached for transaction locks. +# some db servers cannot be reached for transaction locks LOCAL_LOCK_RETRY = 7010 -# Conflict of multiple hot backup processes. +# hot backup conflict HOT_BACKUP_CONFLICT = 7011 -# One or more db servers could not be reached for hot backup inquiry. +# hot backup not all db servers reachable HOT_BACKUP_DBSERVERS_AWOL = 7012 -######################## -# Plan Analyzer Errors # -######################## - -# Plan could not be modified while creating or deleting Analyzers revision. -ERROR_CLUSTER_COULD_NOT_MODIFY_ANALYZERS_IN_PLAN = 7021 - -############## -# AIR Errors # -############## +######################### +# Plan Analyzers errors # +######################### -# During the execution of an AIR program an error occurred. -AIR_EXECUTION_ERROR = 8001 +# analyzers in plan could not be modified +CLUSTER_COULD_NOT_MODIFY_ANALYZERS_IN_PLAN = 7021 ############# # Licensing # ############# -# The license has expired or is invalid. +# license has expired or is invalid LICENSE_EXPIRED_OR_INVALID = 9001 -# Verification of license failed. +# license verification failed LICENSE_SIGNATURE_VERIFICATION = 9002 -# The ID of the license does not match the ID of this instance. +# non-matching license id LICENSE_NON_MATCHING_ID = 9003 -# The installed license does not cover this feature. +# feature is not enabled by the license LICENSE_FEATURE_NOT_ENABLED = 9004 -# The installed license does not cover a higher number of this resource. +# the resource is exhausted LICENSE_RESOURCE_EXHAUSTED = 9005 -# The license does not hold features of an ArangoDB license. +# invalid license LICENSE_INVALID = 9006 -# The license has one or more inferior features. +# conflicting license LICENSE_CONFLICT = 9007 -# Could not verify the license’s signature. +# failed to validate license signature LICENSE_VALIDATION_FAILED = 9008 diff --git a/arango/exceptions.py b/arango/exceptions.py index fb11f8d5..29bcdc17 100644 --- a/arango/exceptions.py +++ b/arango/exceptions.py @@ -360,6 +360,14 @@ class DatabaseDeleteError(ArangoServerError): """Failed to delete database.""" +class DatabaseSupportInfoError(ArangoServerError): + """Failed to retrieve support info for deployment.""" + + +class DatabaseCompactError(ArangoServerError): + """Failed to compact databases.""" + + ####################### # Document Exceptions # ####################### @@ -606,7 +614,7 @@ class PregelJobDeleteError(ArangoServerError): ##################### -class ServerConnectionError(ArangoClientError): +class ServerConnectionError(ArangoServerError): """Failed to connect to ArangoDB server.""" @@ -666,10 +674,22 @@ class ServerLogLevelError(ArangoServerError): """Failed to retrieve server log levels.""" +class ServerLogLevelResetError(ArangoServerError): + """Failed to reset server log levels.""" + + +class ServerLogSettingError(ArangoServerError): + """Failed to retrieve server log settings.""" + + class ServerLogLevelSetError(ArangoServerError): """Failed to set server log levels.""" +class ServerLogSettingSetError(ArangoServerError): + """Failed to set server log settings.""" + + class ServerReloadRoutingError(ArangoServerError): """Failed to reload routing details.""" @@ -683,7 +703,15 @@ class ServerMetricsError(ArangoServerError): class ServerRoleError(ArangoServerError): - """Failed to retrieve server role in a cluster.""" + """Failed to retrieve server role.""" + + +class ServerModeError(ArangoServerError): + """Failed to retrieve server mode.""" + + +class ServerModeSetError(ArangoServerError): + """Failed to set server mode.""" class ServerTLSError(ArangoServerError): @@ -698,6 +726,18 @@ class ServerEncryptionError(ArangoServerError): """Failed to reload user-defined encryption keys.""" +class ServerCurrentOptionsGetError(ArangoServerError): + """Failed to retrieve currently-set server options.""" + + +class ServerAvailableOptionsGetError(ArangoServerError): + """Failed to retrieve available server options.""" + + +class ServerExecuteError(ArangoServerError): + """Failed to execute raw JavaScript command.""" + + ##################### # Task Exceptions # ##################### @@ -744,6 +784,10 @@ class TransactionAbortError(ArangoServerError): """Failed to abort transaction.""" +class TransactionFetchError(ArangoServerError): + """Failed to fetch existing transaction.""" + + class TransactionListError(ArangoServerError): """Failed to retrieve transactions.""" @@ -968,7 +1012,11 @@ class ClusterServerIDError(ArangoServerError): class ClusterServerRoleError(ArangoServerError): - """Failed to retrieve server role.""" + """Failed to retrieve server role in a cluster.""" + + +class ClusterServerModeError(ArangoServerError): + """Failed to retrieve server mode in a cluster.""" class ClusterServerStatisticsError(ArangoServerError): @@ -999,6 +1047,10 @@ class ClusterRebalanceError(ArangoServerError): """Failed to execute cluster re-balancing operation (load/set).""" +class ClusterVpackSortMigrationError(ArangoServerError): + """Failed to execute vpack sort migration request.""" + + ################## # JWT Exceptions # ################## @@ -1022,3 +1074,10 @@ class JWTRefreshError(ArangoClientError): class JWTExpiredError(ArangoClientError): """JWT token has expired.""" + + +################################### +# Parameter Validation Exceptions # +################################### +class SortValidationError(ArangoClientError): + """Invalid sort parameters.""" diff --git a/arango/executor.py b/arango/executor.py index 47ac4a19..9330e11c 100644 --- a/arango/executor.py +++ b/arango/executor.py @@ -19,6 +19,7 @@ OverloadControlExecutorError, TransactionAbortError, TransactionCommitError, + TransactionFetchError, TransactionInitError, TransactionStatusError, ) @@ -241,6 +242,11 @@ class TransactionApiExecutor: :type max_size: int :param allow_dirty_read: Allow reads from followers in a cluster. :type allow_dirty_read: bool | None + :param transaction_id: Initialize using an existing transaction instead of starting + a new transaction. + :type transaction_id: str | None + :param skip_fast_lock_round: Whether to disable fast locking for write operations. + :type skip_fast_lock_round: bool | None """ def __init__( @@ -254,6 +260,8 @@ def __init__( lock_timeout: Optional[int] = None, max_size: Optional[int] = None, allow_dirty_read: bool = False, + transaction_id: Optional[str] = None, + skip_fast_lock_round: Optional[bool] = None, ) -> None: self._conn = connection @@ -274,20 +282,32 @@ def __init__( data["lockTimeout"] = lock_timeout if max_size is not None: data["maxTransactionSize"] = max_size + if skip_fast_lock_round is not None: + data["skipFastLockRound"] = skip_fast_lock_round + + if transaction_id is None: + request = Request( + method="post", + endpoint="/_api/transaction/begin", + data=data, + headers=( + {"x-arango-allow-dirty-read": "true"} if allow_dirty_read else None + ), + ) + resp = self._conn.send_request(request) - request = Request( - method="post", - endpoint="/_api/transaction/begin", - data=data, - headers={"x-arango-allow-dirty-read": "true"} if allow_dirty_read else None, - ) - resp = self._conn.send_request(request) + if not resp.is_success: + raise TransactionInitError(resp, request) - if not resp.is_success: - raise TransactionInitError(resp, request) + result = resp.body["result"] + self._id: str = result["id"] + else: + self._id = transaction_id - result: Json = resp.body["result"] - self._id: str = result["id"] + try: + self.status() + except TransactionStatusError as err: + raise TransactionFetchError(err.response, err.request) @property def context(self) -> str: diff --git a/arango/formatter.py b/arango/formatter.py index 2058b1d6..1c63bf00 100644 --- a/arango/formatter.py +++ b/arango/formatter.py @@ -20,14 +20,22 @@ def format_body(body: Json) -> Json: return body -def format_index(body: Json) -> Json: +def format_index(body: Json, formatter: bool = True) -> Json: """Format index data. :param body: Input body. :type body: dict + :param formatter: Convert (most) keys to snake_case. + :type formatter: bool :return: Formatted body. :rtype: dict """ + if not formatter: + body.pop("code") + body.pop("error") + body["id"] = body["id"].split("/", 1)[-1] + return body + result = {"id": body["id"].split("/", 1)[-1], "fields": body["fields"]} if "type" in body: result["type"] = body["type"] @@ -101,6 +109,12 @@ def format_index(body: Json) -> Json: result["writebuffer_active"] = body["writebufferActive"] if "writebufferSizeMax" in body: result["writebuffer_max_size"] = body["writebufferSizeMax"] + if "fieldValueTypes" in body: + result["field_value_types"] = body["fieldValueTypes"] + + # Introduced in 3.12 EE + if "optimizeTopK" in body: + result["optimizeTopK"] = body["optimizeTopK"] return verify_format(body, result) @@ -351,6 +365,12 @@ def format_aql_query(body: Json) -> Json: # New in 3.11 if "peakMemoryUsage" in body: result["peak_memory_usage"] = body["peakMemoryUsage"] + + # New in 3.12.2 + if "modificationQuery" in body: + result["modification_query"] = body["modificationQuery"] + if "warnings" in body: + result["warnings"] = body["warnings"] return verify_format(body, result) @@ -1241,6 +1261,8 @@ def format_graph_properties(body: Json) -> Json: } if "isSmart" in body: result["smart"] = body["isSmart"] + if "isDisjoint" in body: + result["disjoint"] = body["isDisjoint"] if "isSatellite" in body: result["is_satellite"] = body["isSatellite"] if "smartGraphAttribute" in body: diff --git a/arango/graph.py b/arango/graph.py index 766cae92..3279129f 100644 --- a/arango/graph.py +++ b/arango/graph.py @@ -1,6 +1,7 @@ __all__ = ["Graph"] from typing import List, Optional, Sequence, Union +from warnings import warn from arango.api import ApiGroup from arango.collection import EdgeCollection, VertexCollection @@ -384,6 +385,11 @@ def traverse( ) -> Result[Json]: """Traverse the graph and return the visited vertices and edges. + .. warning:: + + This method is deprecated and no longer works since ArangoDB 3.12. + The preferred way to traverse graphs is via AQL. + :param start_vertex: Start vertex document ID or body with "_id" field. :type start_vertex: str | dict :param direction: Traversal direction. Allowed values are "outbound" @@ -441,6 +447,9 @@ def traverse( :rtype: dict :raise arango.exceptions.GraphTraverseError: If traversal fails. """ + m = "The HTTP traversal API is deprecated since version 3.4.0. The preferred way to traverse graphs is via AQL." # noqa: E501 + warn(m, DeprecationWarning, stacklevel=2) + if strategy is not None: if strategy.lower() == "dfs": strategy = "depthfirst" diff --git a/arango/http.py b/arango/http.py index c5eb0acd..d0b17939 100644 --- a/arango/http.py +++ b/arango/http.py @@ -1,6 +1,13 @@ -__all__ = ["HTTPClient", "DefaultHTTPClient", "DEFAULT_REQUEST_TIMEOUT"] +__all__ = [ + "HTTPClient", + "DefaultHTTPClient", + "DeflateRequestCompression", + "RequestCompression", + "DEFAULT_REQUEST_TIMEOUT", +] import typing +import zlib from abc import ABC, abstractmethod from typing import Any, MutableMapping, Optional, Tuple, Union @@ -40,7 +47,7 @@ def send_request( url: str, headers: Optional[Headers] = None, params: Optional[MutableMapping[str, str]] = None, - data: Union[str, MultipartEncoder, None] = None, + data: Union[str, bytes, MultipartEncoder, None] = None, auth: Optional[Tuple[str, str]] = None, ) -> Response: """Send an HTTP request. @@ -58,7 +65,7 @@ def send_request( :param params: URL (query) parameters. :type params: dict :param data: Request payload. - :type data: str | MultipartEncoder | None + :type data: str | bytes | MultipartEncoder | None :param auth: Username and password. :type auth: tuple :returns: HTTP response. @@ -198,7 +205,7 @@ def send_request( url: str, headers: Optional[Headers] = None, params: Optional[MutableMapping[str, str]] = None, - data: Union[str, MultipartEncoder, None] = None, + data: Union[str, bytes, MultipartEncoder, None] = None, auth: Optional[Tuple[str, str]] = None, ) -> Response: """Send an HTTP request. @@ -214,7 +221,7 @@ def send_request( :param params: URL (query) parameters. :type params: dict :param data: Request payload. - :type data: str | MultipartEncoder | None + :type data: str | bytes | MultipartEncoder | None :param auth: Username and password. :type auth: tuple :returns: HTTP response. @@ -237,3 +244,75 @@ def send_request( status_text=response.reason, raw_body=response.text, ) + + +class RequestCompression(ABC): # pragma: no cover + """Abstract base class for request compression.""" + + @abstractmethod + def needs_compression(self, data: str) -> bool: + """ + :param data: Data to be compressed. + :type data: str + :returns: True if the data needs to be compressed. + :rtype: bool + """ + raise NotImplementedError + + @abstractmethod + def compress(self, data: str) -> bytes: + """Compress the data. + + :param data: Data to be compressed. + :type data: str + :returns: Compressed data. + :rtype: bytes + """ + raise NotImplementedError + + @abstractmethod + def encoding(self) -> str: + """Return the content encoding exactly as it should + appear in the headers. + + :returns: Content encoding. + :rtype: str + """ + raise NotImplementedError + + +class DeflateRequestCompression(RequestCompression): + """Compress requests using the 'deflate' algorithm.""" + + def __init__(self, threshold: int = 1024, level: int = 6): + """ + :param threshold: Will compress requests to the server if + the size of the request body (in bytes) is at least the value of this + option. + :type threshold: int + :param level: Compression level, in 0-9 or -1. + :type level: int + """ + self._threshold = threshold + self._level = level + + def needs_compression(self, data: str) -> bool: + """ + :param data: Data to be compressed. + :type data: str + :returns: True if the data needs to be compressed. + :rtype: bool + """ + return len(data) >= self._threshold + + def compress(self, data: str) -> bytes: + """ + :param data: Data to be compressed. + :type data: str + :returns: Compressed data. + :rtype: bytes + """ + return zlib.compress(data.encode("utf-8"), level=self._level) + + def encoding(self) -> str: + return "deflate" diff --git a/arango/job.py b/arango/job.py index d5065d04..85c96bc8 100644 --- a/arango/job.py +++ b/arango/job.py @@ -60,10 +60,10 @@ def status(self) -> str: fail. :return: Async job status. Possible values are "pending" (job is still - in queue), "done" (job finished or raised an error), or "cancelled" - (job was cancelled before completion). + in queue), "done" (job finished or raised an error). :rtype: str - :raise arango.exceptions.AsyncJobStatusError: If retrieval fails. + :raise arango.exceptions.AsyncJobStatusError: If retrieval fails or + job is not found. """ request = Request(method="get", endpoint=f"/_api/job/{self._id}") resp = self._conn.send_request(request) diff --git a/arango/replication.py b/arango/replication.py index 0ecfc20e..d5fae457 100644 --- a/arango/replication.py +++ b/arango/replication.py @@ -180,13 +180,15 @@ def response_handler(resp: Response) -> Json: if resp.is_success: result = format_replication_header(resp.headers) result["content"] = [ - [ - self._conn.deserialize(line) - for line in resp.body.split("\n") - if line - ] - if deserialize - else resp.body + ( + [ + self._conn.deserialize(line) + for line in resp.body.split("\n") + if line + ] + if deserialize + else resp.body + ) ] return result diff --git a/arango/request.py b/arango/request.py index f7208153..abb2b0db 100644 --- a/arango/request.py +++ b/arango/request.py @@ -12,7 +12,7 @@ def normalize_headers( if driver_flags is not None: for flag in driver_flags: flags = flags + flag + ";" - driver_version = "7.7.0" + driver_version = "8.1.7" driver_header = "python-arango/" + driver_version + " (" + flags + ")" normalized_headers: Headers = { "charset": "utf-8", diff --git a/arango/utils.py b/arango/utils.py index 359b1e37..822bc736 100644 --- a/arango/utils.py +++ b/arango/utils.py @@ -9,10 +9,10 @@ import json import logging from contextlib import contextmanager -from typing import Any, Iterator, Sequence, Union +from typing import Any, Iterator, Optional, Sequence, Union -from arango.exceptions import DocumentParseError -from arango.typings import Json +from arango.exceptions import DocumentParseError, SortValidationError +from arango.typings import Json, Jsons @contextmanager @@ -64,11 +64,11 @@ def get_doc_id(doc: Union[str, Json]) -> str: def is_none_or_int(obj: Any) -> bool: - """Check if obj is None or an integer. + """Check if obj is None or a positive integer. :param obj: Object to check. :type obj: Any - :return: True if object is None or an integer. + :return: True if object is None or a positive integer. :rtype: bool """ return obj is None or (isinstance(obj, int) and obj >= 0) @@ -120,5 +120,48 @@ def build_filter_conditions(filters: Json) -> str: if not filters: return "" - conditions = [f"doc.`{k}` == {json.dumps(v)}" for k, v in filters.items()] + conditions = [] + for k, v in filters.items(): + field = k if "." in k else f"`{k}`" + conditions.append(f"doc.{field} == {json.dumps(v)}") + return "FILTER " + " AND ".join(conditions) + + +def validate_sort_parameters(sort: Jsons) -> bool: + """Validate sort parameters for an AQL query. + + :param sort: Document sort parameters. + :type sort: Jsons + :return: Validation success. + :rtype: bool + :raise arango.exceptions.SortValidationError: If sort parameters are invalid. + """ + assert isinstance(sort, Sequence) + for param in sort: + if "sort_by" not in param or "sort_order" not in param: + raise SortValidationError( + "Each sort parameter must have 'sort_by' and 'sort_order'." + ) + if param["sort_order"].upper() not in ["ASC", "DESC"]: + raise SortValidationError("'sort_order' must be either 'ASC' or 'DESC'") + return True + + +def build_sort_expression(sort: Optional[Jsons]) -> str: + """Build a sort condition for an AQL query. + + :param sort: Document sort parameters. + :type sort: Jsons | None + :return: The complete AQL sort condition. + :rtype: str + """ + if not sort: + return "" + + sort_chunks = [] + for sort_param in sort: + chunk = f"doc.{sort_param['sort_by']} {sort_param['sort_order']}" + sort_chunks.append(chunk) + + return "SORT " + ", ".join(sort_chunks) diff --git a/docs/admin.rst b/docs/admin.rst index 744b44b3..27e29b0b 100644 --- a/docs/admin.rst +++ b/docs/admin.rst @@ -32,9 +32,22 @@ database. # Retrieve the server time. sys_db.time() - # Retrieve the server role in a cluster. + # Retrieve the server role. sys_db.role() + # Retrieve the server role in a cluster. + sys_db.cluster.server_role() + + # Retrieve the server mode. + sys_db.mode() + + # Retrieve the server mode in a cluster. + sys_db.cluster.server_mode() + + # Set the server mode. + sys_db.set_mode('readonly') + sys_db.set_mode('default') + # Retrieve the server statistics. sys_db.statistics() @@ -47,13 +60,16 @@ database. # Set the log . sys_db.set_log_levels( agency='DEBUG', - collector='INFO', + deprecation='INFO', threads='WARNING' ) # Echo the last request. sys_db.echo() + # Echo a request + sys_db.echo('request goes here') + # Reload the routing collection. sys_db.reload_routing() diff --git a/docs/async.rst b/docs/async.rst index 82690b29..5e480248 100644 --- a/docs/async.rst +++ b/docs/async.rst @@ -45,8 +45,8 @@ the results can be retrieved once available via :ref:`AsyncJob` objects. # Retrieve the status of each async job. for job in [job1, job2, job3, job4]: - # Job status can be "pending", "done" or "cancelled". - assert job.status() in {'pending', 'done', 'cancelled'} + # Job status can be "pending" or "done". + assert job.status() in {'pending', 'done'} # Let's wait until the jobs are finished. while job.status() != 'done': diff --git a/docs/certificates.rst b/docs/certificates.rst index 6440df20..e6ffedbc 100644 --- a/docs/certificates.rst +++ b/docs/certificates.rst @@ -17,8 +17,7 @@ your HTTP client as described in the :ref:`HTTPClients` section. The ``ArangoClient`` class provides an option to override the verification behavior, no matter what has been defined in the underlying HTTP session. -You can use this option to disable verification or provide a custom CA bundle without -defining a custom HTTP Client. +You can use this option to disable verification. .. code-block:: python @@ -34,3 +33,18 @@ application: import requests requests.packages.urllib3.disable_warnings() + +You can also provide a custom CA bundle without defining a custom HTTP Client: + +.. code-block:: python + + client = ArangoClient(hosts="https://localhost:8529", verify_override="path/to/certfile") + +If `verify_override` is set to a path to a directory, the directory must have been processed using the `c_rehash` utility +supplied with OpenSSL. For more information, see the `requests documentation `_. + +Setting `verify_override` to `True` will use the system's default CA bundle. + +.. code-block:: python + + client = ArangoClient(hosts="https://localhost:8529", verify_override=True) diff --git a/docs/cluster.rst b/docs/cluster.rst index fbb3bb5e..fdb45bca 100644 --- a/docs/cluster.rst +++ b/docs/cluster.rst @@ -86,8 +86,13 @@ Below is an example on how to manage clusters using python-arango. cluster.server_engine(server_id) cluster.server_version(server_id) cluster.server_statistics(server_id) + cluster.server_maintenance_mode(server_id) - # Toggle maintenance mode (allowed values are "on" and "off"). + # Toggle Server maintenance mode (allowed values are "normal" and "maintenance"). + cluster.toggle_server_maintenance_mode(server_id, 'normal') + cluster.toggle_server_maintenance_mode(server_id, 'maintenance', timeout=30) + + # Toggle Cluster maintenance mode (allowed values are "on" and "off"). cluster.toggle_maintenance_mode('on') cluster.toggle_maintenance_mode('off') diff --git a/docs/compression.rst b/docs/compression.rst new file mode 100644 index 00000000..526e20f1 --- /dev/null +++ b/docs/compression.rst @@ -0,0 +1,40 @@ +Compression +------------ + +The :ref:`ArangoClient` lets you define the preferred compression policy for request and responses. By default +compression is disabled. You can change this by setting the `request_compression` and `response_compression` parameters +when creating the client. Currently, only the "deflate" compression algorithm is supported. + +.. testcode:: + + from arango import ArangoClient + + from arango.http import DeflateRequestCompression + + client = ArangoClient( + hosts='http://localhost:8529', + request_compression=DeflateRequestCompression(), + response_compression="deflate" + ) + +Furthermore, you can customize the request compression policy by defining the minimum size of the request body that +should be compressed and the desired compression level. For example, the following code sets the minimum size to 2 KB +and the compression level to 8: + +.. code-block:: python + + client = ArangoClient( + hosts='http://localhost:8529', + request_compression=DeflateRequestCompression( + threshold=2048, + level=8), + ) + +If you want to implement your own compression policy, you can do so by implementing the +:class:`arango.http.RequestCompression` interface. + +.. note:: + The `response_compression` parameter is only used to inform the server that the client prefers compressed responses + (in the form of an *Accept-Encoding* header). Note that the server may or may not honor this preference, depending + on how it is configured. This can be controlled by setting the `--http.compress-response-threshold` option to + a value greater than 0 when starting the ArangoDB server. diff --git a/docs/conf.py b/docs/conf.py index 77fe6bb8..361d1fbe 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -4,7 +4,7 @@ sys.path.insert(0, os.path.abspath("..")) project = "python-arango" -copyright = "2016-2022, Joohwan Oh" +copyright = "2016-2025, Joohwan Oh" author = "Joohwan Oh" extensions = [ "sphinx_rtd_theme", @@ -17,6 +17,9 @@ html_theme = "sphinx_rtd_theme" master_doc = "index" +# Set canonical URL from the Read the Docs Domain +html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "docs.python-arango.com") + autodoc_member_order = "bysource" doctest_global_setup = """ diff --git a/docs/contributing.rst b/docs/contributing.rst index 35a72657..2093f72f 100644 --- a/docs/contributing.rst +++ b/docs/contributing.rst @@ -7,16 +7,16 @@ Requirements Before submitting a pull request on GitHub_, please make sure you meet the following requirements: -* The pull request points to dev_ branch. +* The pull request points to main_ branch. * Changes are squashed into a single commit. I like to use git rebase for this. * Commit message is in present tense. For example, "Fix bug" is good while "Fixed bug" is not. * Sphinx_-compatible docstrings. * PEP8_ compliance. * No missing docstrings or commented-out lines. -* Test coverage_ remains at %100. If a piece of code is trivial and does not +* Test coverage remains at %100. If a piece of code is trivial and does not need unit tests, use this_ to exclude it from coverage. -* No build failures on `Travis CI`_. Builds automatically trigger on pull +* No build failures. Builds automatically trigger on pull request submissions. * Documentation is kept up-to-date with the new changes (see below). @@ -40,7 +40,7 @@ To ensure PEP8_ compliance, run flake8_: .. code-block:: bash ~$ pip install flake8 - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ flake8 @@ -57,7 +57,7 @@ To run the test suite (use your own host, port and root password): .. code-block:: bash ~$ pip install pytest - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ py.test --complete --host=127.0.0.1 --port=8529 --passwd=passwd @@ -66,7 +66,7 @@ To run the test suite with coverage report: .. code-block:: bash ~$ pip install coverage pytest pytest-cov - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ py.test --complete --host=127.0.0.1 --port=8529 --passwd=passwd --cov=kq @@ -82,18 +82,16 @@ Sphinx_. To build an HTML version on your local machine: .. code-block:: bash ~$ pip install sphinx sphinx_rtd_theme - ~$ git clone https://github.com/ArangoDB-Community/python-arango.git + ~$ git clone https://github.com/arangodb/python-arango.git ~$ cd python-arango ~$ python -m sphinx -b html -W docs docs/_build/ # Open build/index.html in a browser As always, thank you for your contribution! -.. _dev: https://github.com/joowani/python-arango/tree/dev -.. _GitHub: https://github.com/joowani/python-arango +.. _main: https://github.com/arangodb/python-arango/tree/main +.. _GitHub: https://github.com/arangodb/python-arango .. _PEP8: https://www.python.org/dev/peps/pep-0008/ -.. _coverage: https://coveralls.io/github/joowani/python-arango .. _this: http://coverage.readthedocs.io/en/latest/excluding.html -.. _Travis CI: https://travis-ci.org/joowani/python-arango .. _Sphinx: https://github.com/sphinx-doc/sphinx .. _flake8: http://flake8.pycqa.org .. _here: http://flake8.pycqa.org/en/latest/user/violations.html#in-line-ignoring-errors diff --git a/docs/document.rst b/docs/document.rst index 62ad0886..0f0d7d10 100644 --- a/docs/document.rst +++ b/docs/document.rst @@ -103,6 +103,12 @@ Standard documents are managed via collection API wrapper: assert student['GPA'] == 3.6 assert student['last'] == 'Kim' + # Retrieve one or more matching documents, sorted by a field. + for student in students.find({'first': 'John'}, sort=[{'sort_by': 'GPA', 'sort_order': 'DESC'}]): + assert student['_key'] == 'john' + assert student['GPA'] == 3.6 + assert student['last'] == 'Kim' + # Retrieve a document by key. students.get('john') diff --git a/docs/graph.rst b/docs/graph.rst index 0fe8b205..0b37154f 100644 --- a/docs/graph.rst +++ b/docs/graph.rst @@ -318,8 +318,10 @@ See :ref:`Graph` and :ref:`EdgeCollection` for API specification. Graph Traversals ================ -**Graph traversals** are executed via the :func:`arango.graph.Graph.traverse` -method. Each traversal can span across multiple vertex collections, and walk +**Graph traversals** are executed via AQL. The old +:func:`arango.graph.Graph.traverse` has been deprecated and can no longer be +used with ArangoDB 3.12 or later. +Each traversal can span across multiple vertex collections, and walk over edges and vertices using various algorithms. **Example:** @@ -371,13 +373,12 @@ over edges and vertices using various algorithms. teach.insert({'_from': 'teachers/jon', '_to': 'lectures/STA201'}) teach.insert({'_from': 'teachers/jon', '_to': 'lectures/MAT223'}) - # Traverse the graph in outbound direction, breath-first. - school.traverse( - start_vertex='teachers/jon', - direction='outbound', - strategy='bfs', - edge_uniqueness='global', - vertex_uniqueness='global', - ) + # AQL to perform a graph traversal + query = """ + FOR v, e, p IN 1..3 OUTBOUND 'teachers/jon' GRAPH 'school' + OPTIONS { bfs: true, uniqueVertices: 'global' } + RETURN {vertex: v, edge: e, path: p} + """ -See :func:`arango.graph.Graph.traverse` for API specification. + # Traverse the graph in outbound direction, breath-first. + cursor = db.aql.execute(query) diff --git a/docs/index.rst b/docs/index.rst index 232103b0..4856e1b9 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -7,11 +7,13 @@ Python-Arango Welcome to the documentation for **python-arango**, a Python driver for ArangoDB_. +If you're interested in using asyncio, please check python-arango-async_ driver. + Requirements ============= -- ArangoDB version 3.9+ -- Python version 3.8+ +- ArangoDB version 3.11+ +- Python version 3.9+ Installation ============ @@ -23,43 +25,77 @@ Installation Contents ======== +Basics + .. toctree:: :maxdepth: 1 overview database collection - document - schema indexes + document graph - aql simple - cursor + aql + +Specialized Features + +.. toctree:: + :maxdepth: 1 + + pregel + foxx + replication + transaction + cluster + analyzer + view + wal + +API Executions + +.. toctree:: + :maxdepth: 1 + async batch overload - transaction + +Administration + +.. toctree:: + :maxdepth: 1 + admin user + +Miscellaneous + +.. toctree:: + :maxdepth: 1 + task - wal - pregel - foxx - view - analyzer threading certificates errors logging auth http - replication - cluster + compression serializer + schema + cursor backup errno + +Development + +.. toctree:: + :maxdepth: 1 + contributing specs .. _ArangoDB: https://www.arangodb.com +.. _python-arango-async: https://python-arango-async.readthedocs.io diff --git a/docs/indexes.rst b/docs/indexes.rst index 01750146..8df3048f 100644 --- a/docs/indexes.rst +++ b/docs/indexes.rst @@ -27,27 +27,37 @@ on fields ``_from`` and ``_to``. For more information on indexes, refer to # List the indexes in the collection. cities.indexes() - # Add a new hash index on document fields "continent" and "country". - index = cities.add_hash_index(fields=['continent', 'country'], unique=True) + # Add a new persistent index on document fields "continent" and "country". + persistent_index = {'type': 'persistent', 'fields': ['continent', 'country'], 'unique': True} + index = cities.add_index(persistent_index) # Add new fulltext indexes on fields "continent" and "country". - index = cities.add_fulltext_index(fields=['continent']) - index = cities.add_fulltext_index(fields=['country']) + index = cities.add_index({'type': 'fulltext', 'fields': ['continent']}) + index = cities.add_index({'type': 'fulltext', 'fields': ['country']}) - # Add a new skiplist index on field 'population'. - index = cities.add_skiplist_index(fields=['population'], sparse=False) + # Add a new persistent index on field 'population'. + persistent_index = {'type': 'persistent', 'fields': ['population'], 'sparse': False} + index = cities.add_index(persistent_index) # Add a new geo-spatial index on field 'coordinates'. - index = cities.add_geo_index(fields=['coordinates']) + geo_index = {'type': 'geo', 'fields': ['coordinates']} + index = cities.add_index(geo_index) # Add a new persistent index on field 'currency'. - index = cities.add_persistent_index(fields=['currency'], sparse=True) + persistent_index = {'type': 'persistent', 'fields': ['currency'], 'sparse': True} + index = cities.add_index(persistent_index) # Add a new TTL (time-to-live) index on field 'currency'. - index = cities.add_ttl_index(fields=['currency'], expiry_time=200) + ttl_index = {'type': 'ttl', 'fields': ['currency'], 'expireAfter': 200} + index = cities.add_index(ttl_index) + + # Add MDI (multi-dimensional) index on field 'x' and 'y'. + mdi_index = {'type': 'mdi', 'fields': ['x', 'y'], 'fieldValueTypes': 'double'} + index = cities.add_index(mdi_index) # Indexes may be added with a name that can be referred to in AQL queries. - index = cities.add_hash_index(fields=['country'], name='my_hash_index') + persistent_index = {'type': 'persistent', 'fields': ['country'], 'unique': True, 'name': 'my_hash_index'} + index = cities.add_index(persistent_index) # Delete the last index from the collection. cities.delete_index(index['id']) diff --git a/docs/overview.rst b/docs/overview.rst index 76ff4155..053658df 100644 --- a/docs/overview.rst +++ b/docs/overview.rst @@ -29,8 +29,8 @@ Here is an example showing how **python-arango** client can be used: else: students = db.create_collection('students') - # Add a hash index to the collection. - students.add_hash_index(fields=['name'], unique=False) + # Add a persistent index to the collection. + students.add_index({'type': 'persistent', 'fields': ['name'], 'unique': False}) # Truncate the collection. students.truncate() diff --git a/docs/pregel.rst b/docs/pregel.rst index 45c55f4a..5aad7abe 100644 --- a/docs/pregel.rst +++ b/docs/pregel.rst @@ -1,6 +1,10 @@ Pregel ------ +.. warning:: + Starting from ArangoDB 3.12, the Pregel API has been dropped. + Currently, the driver still supports it for the 3.10 and 3.11 versions, but note that it will be dropped eventually. + Python-arango provides support for **Pregel**, ArangoDB module for distributed iterative graph processing. For more information, refer to `ArangoDB manual`_. @@ -8,7 +12,7 @@ iterative graph processing. For more information, refer to `ArangoDB manual`_. **Example:** -.. testcode:: +.. code-block:: python from arango import ArangoClient diff --git a/docs/simple.rst b/docs/simple.rst index 4d483e65..8f28f634 100644 --- a/docs/simple.rst +++ b/docs/simple.rst @@ -1,8 +1,6 @@ Simple Queries -------------- -.. caution:: There is no option to add a TTL (Time to live) or batch size optimizations to the Simple Queries due to how Arango is handling simple collection HTTP requests. Your request may time out and you'll see a CursorNextError exception. The AQL queries provide full functionality. - Here is an example of using ArangoDB's **simply queries**: .. testcode:: diff --git a/docs/specs.rst b/docs/specs.rst index b4f61854..87e1d184 100644 --- a/docs/specs.rst +++ b/docs/specs.rst @@ -103,6 +103,12 @@ DefaultHTTPClient .. autoclass:: arango.http.DefaultHTTPClient :members: +DeflateRequestCompression +========================= + +.. autoclass:: arango.http.DeflateRequestCompression + :members: + .. _EdgeCollection: EdgeCollection diff --git a/docs/transaction.rst b/docs/transaction.rst index 18d60a68..8bdeb18d 100644 --- a/docs/transaction.rst +++ b/docs/transaction.rst @@ -75,6 +75,16 @@ logical unit of work (ACID compliant). assert 'Lily' not in col assert len(col) == 3 # transaction is aborted so txn_col cannot be used + # Fetch an existing transaction. Useful if you have received a Transaction ID + # from some other part of your system or an external system. + original_txn = db.begin_transaction(write='students') + txn_col = original_txn.collection('students') + assert '_rev' in txn_col.insert({'_key': 'Chip'}) + txn_db = db.fetch_transaction(original_txn.transaction_id) + txn_col = txn_db.collection('students') + assert '_rev' in txn_col.insert({'_key': 'Alya'}) + txn_db.abort_transaction() + See :ref:`TransactionDatabase` for API specification. Alternatively, you can use diff --git a/pyproject.toml b/pyproject.toml index 7c0d1244..8ed943be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,14 +11,14 @@ description = "Python Driver for ArangoDB" authors = [ {name= "Joohwan Oh", email = "joohwan.oh@outlook.com" }] maintainers = [ {name = "Joohwan Oh", email = "joohwan.oh@outlook.com"}, - {name = "Alexandru Petenchea", email = "alexandru.petenchea@arangodb.com"}, + {name = "Alexandru Petenchea", email = "alex.petenchea@gmail.com"}, {name = "Anthony Mahanna", email = "anthony.mahanna@arangodb.com"} ] keywords = ["arangodb", "python", "driver"] readme = "README.md" dynamic = ["version"] license = { file = "LICENSE" } -requires-python = ">=3.8" +requires-python = ">=3.9" classifiers = [ "Intended Audience :: Developers", @@ -26,7 +26,6 @@ classifiers = [ "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -57,7 +56,6 @@ dev = [ "pytest-cov>=3.0.0", "sphinx", "sphinx_rtd_theme", - "types-pkg_resources", "types-requests", "types-setuptools", ] @@ -66,7 +64,7 @@ dev = [ "arango" = ["py.typed"] [project.urls] -homepage = "https://github.com/ArangoDB-Community/python-arango" +homepage = "https://github.com/arangodb/python-arango" [tool.setuptools] packages = ["arango"] diff --git a/setup.cfg b/setup.cfg index e600ca8d..846ab7fb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [flake8] max-line-length = 88 extend-ignore = E203, E741, W503 -exclude =.git .idea .*_cache dist htmlcov venv +exclude =.git .idea .*_cache dist htmlcov venv arango/errno.py per-file-ignores = __init__.py:F401 diff --git a/starter.sh b/starter.sh index d0973f37..b4e39f24 100755 --- a/starter.sh +++ b/starter.sh @@ -6,7 +6,7 @@ # Usage: # ./starter.sh [single|cluster] [community|enterprise] [version] # Example: -# ./starter.sh cluster enterprise 3.11.4 +# ./starter.sh cluster enterprise 3.12.1 setup="${1:-single}" license="${2:-community}" @@ -32,11 +32,12 @@ else exit 1 fi -conf_file="" -if [[ "${version%.*}" == "3.10" ]]; then - conf_file="${setup}-3.10" +if [ "$version" == "latest" ]; then + conf_file="${setup}-3.12" +elif [[ "$version" == *.*.* ]]; then + conf_file="${setup}-${version%.*}" else - conf_file="${setup}" + conf_file="${setup}-${version}" fi docker run -d \ diff --git a/tests/conftest.py b/tests/conftest.py index da95e2ef..ee5a0cd3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -106,9 +106,9 @@ def pytest_configure(config): col_name = generate_col_name() tst_col = tst_db.create_collection(col_name, edge=False) - tst_col.add_skiplist_index(["val"]) - tst_col.add_fulltext_index(["text"]) - geo_index = tst_col.add_geo_index(["loc"]) + tst_col.add_index({"type": "skiplist", "fields": ["val"]}) + tst_col.add_index({"type": "fulltext", "fields": ["text"]}) + geo_index = tst_col.add_index({"type": "geo", "fields": ["loc"]}) # Create a legacy edge collection for testing. icol_name = generate_col_name() @@ -134,7 +134,7 @@ def pytest_configure(config): global_data.username = username global_data.password = password global_data.db_name = tst_db_name - global_data.db_version = version.parse(db_version) + global_data.db_version = version.parse(db_version.split("-")[0]) global_data.sys_db = sys_db global_data.tst_db = tst_db global_data.bad_db = bad_db diff --git a/tests/static/cluster.conf b/tests/static/cluster-3.11.conf similarity index 83% rename from tests/static/cluster.conf rename to tests/static/cluster-3.11.conf index 182f3d17..86f78556 100644 --- a/tests/static/cluster.conf +++ b/tests/static/cluster-3.11.conf @@ -11,3 +11,4 @@ jwt-secret = /tests/static/keyfile all.database.password = passwd all.database.extended-names = true all.log.api-enabled = true +all.javascript.allow-admin-execute = true diff --git a/tests/static/cluster-3.10.conf b/tests/static/cluster-3.12.conf similarity index 62% rename from tests/static/cluster-3.10.conf rename to tests/static/cluster-3.12.conf index 573c030a..d33e07a3 100644 --- a/tests/static/cluster-3.10.conf +++ b/tests/static/cluster-3.12.conf @@ -9,4 +9,7 @@ jwt-secret = /tests/static/keyfile [args] all.database.password = passwd +all.database.extended-names = true all.log.api-enabled = true +all.javascript.allow-admin-execute = true +all.server.options-api = admin diff --git a/tests/static/single.conf b/tests/static/single-3.11.conf similarity index 80% rename from tests/static/single.conf rename to tests/static/single-3.11.conf index e880f9d5..df45cb76 100644 --- a/tests/static/single.conf +++ b/tests/static/single-3.11.conf @@ -9,3 +9,4 @@ jwt-secret = /tests/static/keyfile [args] all.database.password = passwd all.database.extended-names = true +all.javascript.allow-admin-execute = true diff --git a/tests/static/single-3.10.conf b/tests/static/single-3.12.conf similarity index 55% rename from tests/static/single-3.10.conf rename to tests/static/single-3.12.conf index c982303b..d5df3aa9 100644 --- a/tests/static/single-3.10.conf +++ b/tests/static/single-3.12.conf @@ -8,3 +8,6 @@ jwt-secret = /tests/static/keyfile [args] all.database.password = passwd +all.database.extended-names = true +all.javascript.allow-admin-execute = true +all.server.options-api = admin diff --git a/tests/test_analyzer.py b/tests/test_analyzer.py index 63627251..a7573bd6 100644 --- a/tests/test_analyzer.py +++ b/tests/test_analyzer.py @@ -14,13 +14,23 @@ def test_analyzer_management(db, bad_db, cluster, enterprise, db_version): full_analyzer_name = db.name + "::" + analyzer_name bad_analyzer_name = generate_analyzer_name() - # Test create analyzer + # Test create identity analyzer result = db.create_analyzer(analyzer_name, "identity", {}) assert result["name"] == full_analyzer_name assert result["type"] == "identity" assert result["properties"] == {} assert result["features"] == [] + # Test create delimiter analyzer + result = db.create_analyzer( + name=generate_analyzer_name(), + analyzer_type="delimiter", + properties={"delimiter": ","}, + ) + assert result["type"] == "delimiter" + assert result["properties"] == {"delimiter": ","} + assert result["features"] == [] + # Test create duplicate with bad database with assert_raises(AnalyzerCreateError) as err: bad_db.create_analyzer(analyzer_name, "identity", {}, []) @@ -60,7 +70,7 @@ def test_analyzer_management(db, bad_db, cluster, enterprise, db_version): assert db.delete_analyzer(analyzer_name, ignore_missing=True) is False # Test create geo_s2 analyzer (EE only) - if enterprise and db_version >= version.parse("3.10.5"): + if enterprise: analyzer_name = generate_analyzer_name() result = db.create_analyzer(analyzer_name, "geo_s2", {}) assert result["type"] == "geo_s2" @@ -71,3 +81,22 @@ def test_analyzer_management(db, bad_db, cluster, enterprise, db_version): "format": "latLngDouble", } assert db.delete_analyzer(analyzer_name) + + # Test create delimieter analyzer with multiple delimiters + if db_version >= version.parse("3.12.0"): + result = db.create_analyzer( + name=generate_analyzer_name(), + analyzer_type="multi_delimiter", + properties={"delimiters": [",", "."]}, + ) + + assert result["type"] == "multi_delimiter" + assert result["properties"] == {"delimiters": [",", "."]} + assert result["features"] == [] + + if db_version >= version.parse("3.12.0"): + analyzer_name = generate_analyzer_name() + result = db.create_analyzer(analyzer_name, "wildcard", {"ngramSize": 4}) + assert result["type"] == "wildcard" + assert result["features"] == [] + assert result["properties"] == {"ngramSize": 4} diff --git a/tests/test_aql.py b/tests/test_aql.py index 65b7365e..6c1d3ea3 100644 --- a/tests/test_aql.py +++ b/tests/test_aql.py @@ -1,5 +1,7 @@ +import pytest from packaging import version +from arango.errno import FORBIDDEN from arango.exceptions import ( AQLCacheClearError, AQLCacheConfigureError, @@ -199,8 +201,7 @@ def test_aql_query_management(db_version, db, bad_db, col, docs): assert "state" in query assert "bind_vars" in query assert "runtime" in query - if db_version >= version.parse("3.11"): - assert "peak_memory_usage" in query + assert "peak_memory_usage" in query assert len(queries) == 2 # Test list queries with bad database @@ -247,7 +248,7 @@ def test_aql_query_management(db_version, db, bad_db, col, docs): def test_aql_query_force_one_shard_attribute_value(db, db_version, enterprise, cluster): - if db_version < version.parse("3.10") or not enterprise or not cluster: + if not enterprise or not cluster: return name = generate_col_name() @@ -347,6 +348,86 @@ def test_aql_function_management(db, bad_db): assert db.aql.functions() == [] +def test_cache_results_management(db, bad_db, col, docs, cluster): + if cluster: + pytest.skip("Cluster mode does not support query result cache management") + + aql = db.aql + cache = aql.cache + + # Sanity check, just see if the response is OK. + _ = cache.properties() + with pytest.raises(AQLCachePropertiesError) as err: + _ = bad_db.aql.cache.properties() + assert err.value.error_code == FORBIDDEN + + # Turn on caching + result = cache.configure(mode="on") + assert result["mode"] == "on" + result = cache.properties() + assert result["mode"] == "on" + with pytest.raises(AQLCacheConfigureError) as err: + _ = bad_db.aql.cache.configure(mode="on") + assert err.value.error_code == FORBIDDEN + + # Run a simple query to use the cache + col.insert(docs[0]) + _ = aql.execute( + query="FOR doc IN @@collection RETURN doc", + bind_vars={"@collection": col.name}, + cache=True, + ) + + # Check the entries + entries = cache.entries() + assert isinstance(entries, list) + assert len(entries) > 0 + + with pytest.raises(AQLCacheEntriesError) as err: + _ = bad_db.aql.cache.entries() + assert err.value.error_code == FORBIDDEN + + # Clear the cache + cache.clear() + entries = cache.entries() + assert len(entries) == 0 + with pytest.raises(AQLCacheClearError) as err: + bad_db.aql.cache.clear() + assert err.value.error_code == FORBIDDEN + + +def test_cache_plan_management(db, bad_db, col, docs, db_version): + if db_version < version.parse("3.12.4"): + pytest.skip("Query plan cache is supported in ArangoDB 3.12.4+") + + aql = db.aql + cache = aql.cache + + # Run a simple query to use the cache + col.insert(docs[0]) + _ = aql.execute( + query="FOR doc IN @@collection RETURN doc", + bind_vars={"@collection": col.name}, + use_plan_cache=True, + ) + + # Check the entries + entries = cache.plan_entries() + assert isinstance(entries, list) + assert len(entries) > 0 + with pytest.raises(AQLCacheEntriesError) as err: + _ = bad_db.aql.cache.plan_entries() + assert err.value.error_code == FORBIDDEN + + # Clear the cache + cache.clear_plan() + entries = cache.plan_entries() + assert len(entries) == 0 + with pytest.raises(AQLCacheClearError) as err: + bad_db.aql.cache.clear_plan() + assert err.value.error_code == FORBIDDEN + + def test_aql_cache_management(db, bad_db): # Test get AQL cache properties properties = db.aql.cache.properties() diff --git a/tests/test_client.py b/tests/test_client.py index 5faa84db..a196a8fd 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -8,10 +8,15 @@ from arango.client import ArangoClient from arango.database import StandardDatabase -from arango.exceptions import ServerConnectionError -from arango.http import DefaultHTTPClient +from arango.exceptions import ArangoClientError, ServerConnectionError +from arango.http import DefaultHTTPClient, DeflateRequestCompression from arango.resolver import FallbackHostResolver, RandomHostResolver, SingleHostResolver -from tests.helpers import generate_db_name, generate_string, generate_username +from tests.helpers import ( + generate_col_name, + generate_db_name, + generate_string, + generate_username, +) def test_client_attributes(): @@ -84,7 +89,7 @@ def test_client_bad_connection(db, username, password, cluster): # Test connection with invalid host URL client = ArangoClient(hosts="http://127.0.0.1:8500") - with pytest.raises(ServerConnectionError) as err: + with pytest.raises(ArangoClientError) as err: client.db(db.name, username, password, verify=True) assert "bad connection" in str(err.value) @@ -184,3 +189,56 @@ def test_can_serialize_deserialize_client() -> None: client_pstr = pickle.dumps(client) client2 = pickle.loads(client_pstr) assert len(client2._sessions) > 0 + + +def test_client_compression(db, username, password): + class CheckCompression: + def __init__(self, should_compress: bool): + self.should_compress = should_compress + + def check(self, headers): + if self.should_compress: + assert headers["content-encoding"] == "deflate" + else: + assert "content-encoding" not in headers + + class MyHTTPClient(DefaultHTTPClient): + def __init__(self, compression_checker: CheckCompression) -> None: + super().__init__() + self.checker = compression_checker + + def send_request( + self, session, method, url, headers=None, params=None, data=None, auth=None + ): + self.checker.check(headers) + return super().send_request( + session, method, url, headers, params, data, auth + ) + + checker = CheckCompression(should_compress=False) + + # should not compress, as threshold is 0 + client = ArangoClient( + hosts="http://127.0.0.1:8529", + http_client=MyHTTPClient(compression_checker=checker), + response_compression="gzip", + ) + db = client.db(db.name, username, password) + col = db.create_collection(generate_col_name()) + col.insert({"_key": "1"}) + + # should not compress, as size of payload is less than threshold + checker = CheckCompression(should_compress=False) + client = ArangoClient( + hosts="http://127.0.0.1:8529", + http_client=MyHTTPClient(compression_checker=checker), + request_compression=DeflateRequestCompression(250, level=7), + response_compression="deflate", + ) + db = client.db(db.name, username, password) + col = db.create_collection(generate_col_name()) + col.insert({"_key": "2"}) + + # should compress + checker.should_compress = True + col.insert({"_key": "3" * 250}) diff --git a/tests/test_cluster.py b/tests/test_cluster.py index bbc31778..0a4cd19f 100644 --- a/tests/test_cluster.py +++ b/tests/test_cluster.py @@ -1,3 +1,4 @@ +import time import warnings import pytest @@ -12,9 +13,11 @@ ClusterServerCountError, ClusterServerEngineError, ClusterServerIDError, + ClusterServerModeError, ClusterServerRoleError, ClusterServerStatisticsError, ClusterServerVersionError, + ClusterVpackSortMigrationError, ) from tests.helpers import assert_raises @@ -43,6 +46,18 @@ def test_cluster_server_role(sys_db, bad_db, cluster): assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} +def test_cluster_server_mode(sys_db, bad_db, cluster): + if not cluster: + pytest.skip("Only tested in a cluster setup") + + result = sys_db.cluster.server_mode() + assert result == "default" + + with assert_raises(ClusterServerModeError) as err: + bad_db.cluster.server_mode() + assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} + + def test_cluster_health(sys_db, bad_db, cluster): if not cluster: pytest.skip("Only tested in a cluster setup") @@ -99,6 +114,37 @@ def test_cluster_server_statistics(sys_db, bad_db, cluster): assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} +def test_cluster_server_maintenance_mode(sys_db, bad_db, cluster): + if not cluster: + pytest.skip("Only tested in a cluster setup") + + # Must be a DBServer + health = sys_db.cluster.health() + server_id = None + for server_id, info in health["Health"].items(): + if info["Role"] == "DBServer": + server_id = server_id + break + if server_id is None: + pytest.skip("No DBServer found in cluster") + + result = sys_db.cluster.server_maintenance_mode(server_id) + assert result == {} + + with assert_raises(ClusterMaintenanceModeError) as err: + bad_db.cluster.server_maintenance_mode(server_id) + assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} + + sys_db.cluster.toggle_server_maintenance_mode(server_id, "maintenance", timeout=2) + result = sys_db.cluster.server_maintenance_mode(server_id) + assert "Mode" in result + assert "Until" in result + + time.sleep(5) + result = sys_db.cluster.server_maintenance_mode(server_id) + assert result == {} + + def test_cluster_toggle_maintenance_mode(sys_db, bad_db, cluster): if not cluster: pytest.skip("Only tested in a cluster setup") @@ -140,13 +186,10 @@ def test_cluster_server_count(db, bad_db, cluster): assert err.value.error_code in {FORBIDDEN, DATABASE_NOT_FOUND} -def test_cluster_rebalance(sys_db, bad_db, cluster, db_version): +def test_cluster_rebalance(sys_db, bad_db, cluster): if not cluster: pytest.skip("Only tested in a cluster setup") - if db_version < version.parse("3.10.0"): - pytest.skip("Only tested on ArangoDB 3.10+") - # Test imbalance retrieval imbalance = sys_db.cluster.calculate_imbalance() assert "leader" in imbalance @@ -194,3 +237,25 @@ def test_cluster_rebalance(sys_db, bad_db, cluster, db_version): with assert_raises(ClusterRebalanceError) as err: bad_db.cluster.rebalance() assert err.value.error_code == FORBIDDEN + + +def test_vpack_sort_migration(sys_db, bad_db, db_version, cluster): + if not cluster: + pytest.skip("Only tested in a cluster setup") + if db_version < version.parse("3.12.2"): + pytest.skip("vpackSortMigration is only tested in 3.12.2+") + + sys_db.cluster.vpack_sort_migration_status() + with assert_raises(ClusterVpackSortMigrationError) as err: + bad_db.cluster.vpack_sort_migration_status() + assert err.value.error_code == FORBIDDEN + + sys_db.cluster.vpack_sort_migration_index_check() + with assert_raises(ClusterVpackSortMigrationError) as err: + bad_db.cluster.vpack_sort_migration_index_check() + assert err.value.error_code == FORBIDDEN + + sys_db.cluster.migrate_vpack_sorting() + with assert_raises(ClusterVpackSortMigrationError) as err: + bad_db.cluster.migrate_vpack_sorting() + assert err.value.error_code == FORBIDDEN diff --git a/tests/test_collection.py b/tests/test_collection.py index a2d07587..c11a6541 100644 --- a/tests/test_collection.py +++ b/tests/test_collection.py @@ -1,5 +1,4 @@ import pytest -from packaging import version from arango.client import ArangoClient from arango.collection import StandardCollection @@ -137,6 +136,8 @@ def test_collection_misc_methods(col, bad_col, cluster): # Test truncate collection assert col.truncate() is True assert len(col) == 0 + assert col.truncate(sync=True, compact=False) is True + assert len(col) == 0 # Test truncate with bad collection with assert_raises(CollectionTruncateError) as err: @@ -191,19 +192,25 @@ def test_collection_management(db, bad_db, cluster): } ] + col = db.create_collection( + name=col_name, key_generator="autoincrement", key_increment=9, key_offset=100 + ) + key_options = col.properties()["key_options"] + assert key_options["key_generator"] == "autoincrement" + assert key_options["key_increment"] == 9 + assert key_options["key_offset"] == 100 + db.delete_collection(col_name) + col = db.create_collection( name=col_name, sync=True, system=False, key_generator="traditional", user_keys=False, - key_increment=9, - key_offset=100, edge=True, shard_count=2, shard_fields=["test_attr:"], replication_factor=1, - shard_like="", sync_replication=False, enforce_replication_factor=False, sharding_strategy="community-compat", @@ -230,6 +237,14 @@ def test_collection_management(db, bad_db, cluster): assert properties["computedValues"] == computed_values col.configure(computed_values=[]) + if cluster: + # Create distribute-shards-like collection + shard_like_name = col_name + "_shards_like" + shard_like_col = db.create_collection(name=shard_like_name, shard_like=col_name) + assert shard_like_col.properties()["shard_like"] == col_name + assert db.has_collection(shard_like_name) is True + assert db.delete_collection(shard_like_name, system=False) is True + # Test create duplicate collection with assert_raises(CollectionCreateError) as err: db.create_collection(col_name) @@ -316,21 +331,15 @@ def special_db_names(sys_db): pass -def test_collection_utf8(db, db_version, special_collection_names): - if db_version < version.parse("3.11.0"): - pytest.skip("UTF8 collection names require ArangoDB 3.11+") - +def test_collection_utf8(db, special_collection_names): for name in special_collection_names: create_and_delete_collection(db, name) # Not sure if this belongs in here or in `test_database.py`... def test_database_and_collection_utf8( - sys_db, db_version, special_collection_names, special_db_names + sys_db, special_collection_names, special_db_names ): - if db_version < version.parse("3.11.0"): - pytest.skip("UTF8 collection names require ArangoDB 3.11+") - client = ArangoClient(hosts="http://127.0.0.1:8529") for db_name in special_db_names: username = generate_username() @@ -362,7 +371,8 @@ def create_and_delete_collection(db, name): assert col.name == name assert db.has_collection(name) is True - index_id = col.add_hash_index(fields=["foo"])["name"] + persistent_index = {"type": "persistent", "fields": ["foo"]} + index_id = col.add_index(persistent_index)["name"] assert index_id == col.indexes()[-1]["name"] assert col.delete_index(index_id) is True diff --git a/tests/test_cursor.py b/tests/test_cursor.py index e03eae32..184d7ed8 100644 --- a/tests/test_cursor.py +++ b/tests/test_cursor.py @@ -1,5 +1,4 @@ import pytest -from packaging import version from arango.exceptions import ( CursorCloseError, @@ -23,7 +22,7 @@ def test_cursor_from_execute_query(db, col, docs): batch_size=2, ttl=1000, optimizer_rules=["+all"], - profile=True, + profile=2, ) cursor_id = cursor.id assert "Cursor" in repr(cursor) @@ -42,12 +41,27 @@ def test_cursor_from_execute_query(db, col, docs): assert "http_requests" in statistics assert "scanned_full" in statistics assert "scanned_index" in statistics + assert "nodes" in statistics + assert cursor.warnings() == [] profile = cursor.profile() assert profile["initializing"] > 0 assert profile["parsing"] > 0 + plan = cursor.plan() + expected_keys = { + "nodes", + "rules", + "collections", + "variables", + "estimatedCost", + "estimatedNrItems", + "isModificationQuery", + } + for key in expected_keys: + assert key in plan + assert clean_doc(cursor.next()) == docs[0] assert cursor.id == cursor_id assert cursor.has_more() is True @@ -107,7 +121,7 @@ def test_cursor_write_query(db, col, docs): batch_size=1, ttl=1000, optimizer_rules=["+all"], - profile=True, + profile=1, max_runtime=0.0, ) cursor_id = cursor.id @@ -263,7 +277,7 @@ def test_cursor_manual_fetch_and_pop(db, col, docs): assert err.value.message == "current batch is empty" -def test_cursor_retry_disabled(db, col, docs, db_version): +def test_cursor_retry_disabled(db, col, docs): cursor = db.aql.execute( f"FOR d IN {col.name} SORT d._key RETURN d", count=True, @@ -282,8 +296,7 @@ def test_cursor_retry_disabled(db, col, docs, db_version): # The next batch ID should have no effect cursor._next_batch_id = "2" result = cursor.fetch() - if db_version >= version.parse("3.11.1"): - assert result["next_batch_id"] == "4" + assert result["next_batch_id"] == "4" doc = cursor.pop() assert clean_doc(doc) == docs[2] @@ -308,28 +321,25 @@ def test_cursor_retry(db, col, docs, db_version): result = cursor.fetch() assert result["id"] == cursor.id - if db_version >= version.parse("3.11.0"): - assert result["next_batch_id"] == "3" + assert result["next_batch_id"] == "3" doc = cursor.pop() assert clean_doc(doc) == docs[1] assert cursor.empty() # Decrease the next batch ID as if the previous fetch failed - if db_version >= version.parse("3.11.0"): - cursor._next_batch_id = "2" - result = cursor.fetch() - assert result["id"] == cursor.id - assert result["next_batch_id"] == "3" - doc = cursor.pop() - assert clean_doc(doc) == docs[1] - assert cursor.empty() + cursor._next_batch_id = "2" + result = cursor.fetch() + assert result["id"] == cursor.id + assert result["next_batch_id"] == "3" + doc = cursor.pop() + assert clean_doc(doc) == docs[1] + assert cursor.empty() # Fetch the next batches normally for batch in range(2, 5): result = cursor.fetch() assert result["id"] == cursor.id - if db_version >= version.parse("3.11.0"): - assert result["next_batch_id"] == str(batch + 2) + assert result["next_batch_id"] == str(batch + 2) doc = cursor.pop() assert clean_doc(doc) == docs[batch] @@ -340,17 +350,12 @@ def test_cursor_retry(db, col, docs, db_version): doc = cursor.pop() assert clean_doc(doc) == docs[-1] - if db_version >= version.parse("3.11.0"): - # We should be able to fetch the last batch again - cursor.fetch() - doc = cursor.pop() - assert clean_doc(doc) == docs[-1] + # We should be able to fetch the last batch again + cursor.fetch() + doc = cursor.pop() + assert clean_doc(doc) == docs[-1] - if db_version >= version.parse("3.11.0"): - assert cursor.close() - else: - with pytest.raises(CursorCloseError): - cursor.close() + assert cursor.close() def test_cursor_no_count(db, col): diff --git a/tests/test_database.py b/tests/test_database.py index da6307a4..0b1d9752 100644 --- a/tests/test_database.py +++ b/tests/test_database.py @@ -13,17 +13,21 @@ USE_SYSTEM_DATABASE, ) from arango.exceptions import ( + DatabaseCompactError, DatabaseCreateError, DatabaseDeleteError, DatabaseListError, DatabasePropertiesError, + DatabaseSupportInfoError, ServerDetailsError, ServerEchoError, ServerEngineError, ServerLicenseSetError, ServerLogLevelError, + ServerLogLevelResetError, ServerLogLevelSetError, ServerMetricsError, + ServerModeSetError, ServerReadLogError, ServerReloadRoutingError, ServerRequiredDBVersionError, @@ -37,7 +41,12 @@ from arango.pregel import Pregel from arango.replication import Replication from arango.wal import WAL -from tests.helpers import assert_raises, generate_db_name +from tests.helpers import ( + assert_raises, + generate_col_name, + generate_db_name, + generate_jwt, +) def test_database_attributes(db, username): @@ -57,7 +66,7 @@ def test_database_attributes(db, username): assert isinstance(db.wal, WAL) -def test_database_misc_methods(sys_db, db, bad_db, cluster): +def test_database_misc_methods(client, sys_db, db, bad_db, cluster, secret, db_version): # Test get properties properties = db.properties() assert "id" in properties @@ -90,8 +99,8 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): assert err.value.error_code in {11, 1228} # Test get server required database version - version = db.required_db_version() - assert isinstance(version, str) + required_version = db.required_db_version() + assert isinstance(required_version, str) # Test get server target version with bad database with assert_raises(ServerRequiredDBVersionError): @@ -132,6 +141,19 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): bad_db.role() assert err.value.error_code in {11, 1228} + # Test get/set server mode + assert sys_db.mode() == "default" + with assert_raises(ServerModeSetError): + sys_db.set_mode("badmode") + assert err.value.error_code in {11, 1228} + + with assert_raises(ServerModeSetError): + db.set_mode("readonly") + assert err.value.error_code in {11, 1228} + + result = sys_db.set_mode("default") + assert result == {"mode": "default"} + # Test get server status status = db.status() assert "host" in status @@ -166,6 +188,12 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): bad_db.echo() assert err.value.error_code in {11, 1228} + # Test echo (forward request) + body = "request goes here" + echo = db.echo(body) + assert isinstance(echo, dict) + assert echo["requestBody"] == body + # Test read_log with default parameters # Deprecated in 3.8.0 # TODO: Remove in future release @@ -222,7 +250,11 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): assert err.value.error_code in {11, 1228} # Test get log levels - assert isinstance(sys_db.log_levels(), dict) + default_log_levels = sys_db.log_levels() + assert isinstance(default_log_levels, dict) + if db_version >= version.parse("3.12.2"): + log_levels_with_appenders = sys_db.log_levels(with_appenders=True) + assert isinstance(log_levels_with_appenders, dict) # Test get log levels with bad database with assert_raises(ServerLogLevelError) as err: @@ -253,6 +285,33 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): with assert_raises(ServerLogLevelSetError): bad_db.set_log_levels(**new_levels) + # Test Log Settings + result_1 = sys_db.set_log_settings(database=True, url=True, username=True) + result_2 = sys_db.log_settings() + assert isinstance(result_1, dict) + assert "database" in result_1 + assert "url" in result_1 + assert "username" in result_1 + assert result_1 == result_2 + + result_1 = sys_db.set_log_settings(database=True, username=False) + result_2 = sys_db.log_settings() + assert "database" in result_1 + assert "url" in result_1 + assert "username" not in result_1 + assert result_1 == result_2 + + # Reset Log Settings + if db.version() >= "3.12.1": + if cluster: + server_id = sys_db.cluster.server_id() + assert isinstance(sys_db.reset_log_levels(server_id), dict) + + result = sys_db.reset_log_levels() + assert result == default_log_levels + with assert_raises(ServerLogLevelResetError): + bad_db.reset_log_levels() + # Test get storage engine engine = db.engine() assert engine["name"] in ["rocksdb"] @@ -263,6 +322,32 @@ def test_database_misc_methods(sys_db, db, bad_db, cluster): bad_db.engine() assert err.value.error_code in {11, 1228} + with assert_raises(DatabaseSupportInfoError) as err: + db.support_info() + + info = sys_db.support_info() + assert isinstance(info, dict) + assert "deployment" in info + assert "date" in info + + # Test execute JavaScript code + assert db.execute(1) is None + assert db.execute(None) == {"error": False, "code": 200} + assert db.execute("") == {"error": False, "code": 200} + assert db.execute("return 1") == 1 + + # Test database compact + with assert_raises(DatabaseCompactError) as err: + db.compact() + + collection = db.create_collection(generate_col_name()) + collection.insert({"foo": "bar"}) + + token = generate_jwt(secret) + db_superuser = client.db(db.name, superuser_token=token) + result = db_superuser.compact() + assert result == {} + def test_database_management(db, sys_db, bad_db): # Test list databases @@ -347,10 +432,7 @@ def special_db_names(sys_db): pass -def test_database_utf8(sys_db, db_version, special_db_names): - if db_version < version.parse("3.11.0"): - pytest.skip("UTF8 collection names require ArangoDB 3.11+") - +def test_database_utf8(sys_db, special_db_names): for name in special_db_names: assert sys_db.create_database(name) assert sys_db.has_database(name) @@ -372,4 +454,13 @@ def test_license(sys_db, enterprise): else: assert license == {"license": "none"} with pytest.raises(ServerLicenseSetError): - sys_db.set_license("abc") + sys_db.set_license('"abc"') + + +def test_options(sys_db, db_version): + # Skip if below 3.12 + if db_version < version.parse("3.12.0"): + pytest.skip("Database options require ArangoDB 3.12+") + + assert sys_db.options() + assert sys_db.options_available() diff --git a/tests/test_document.py b/tests/test_document.py index a4127a27..0dbca038 100644 --- a/tests/test_document.py +++ b/tests/test_document.py @@ -1,4 +1,5 @@ import pytest +from packaging import version from arango.exceptions import ( DocumentCountError, @@ -238,6 +239,10 @@ def test_document_insert_many(col, bad_col, docs): assert isinstance(result["old"], dict) assert isinstance(result["_old_rev"], str) + # Test insert_many with raise_on_document_error set to True + with assert_raises(DocumentInsertError) as err: + col.insert_many(docs, raise_on_document_error=True) + # Test get with bad database with assert_raises(DocumentInsertError) as err: bad_col.insert_many(docs) @@ -1091,6 +1096,10 @@ def test_document_delete_many(col, bad_col, docs): assert "[HTTP 202][ERR 1200]" in error.message assert len(col) == 6 + # Test delete_many with raise_on_document_error set to True + with assert_raises(DocumentRevisionError) as err: + col.delete_many(docs, raise_on_document_error=True) + # Test delete_many (documents) with missing documents empty_collection(col) results = col.delete_many( @@ -1108,6 +1117,10 @@ def test_document_delete_many(col, bad_col, docs): assert "[HTTP 202][ERR 1202]" in error.message assert len(col) == 0 + # Test delete_many with raise_on_document_error set to True + with assert_raises(DocumentDeleteError) as err: + col.delete_many(docs, raise_on_document_error=True) + # Test delete_many with bad database with assert_raises(DocumentDeleteError) as err: bad_col.delete_many(docs) @@ -1161,6 +1174,26 @@ def test_document_find(col, bad_col, docs): # Set up test documents col.import_bulk(docs) + # Test find with sort expression (single field) + found = list(col.find({}, sort=[{"sort_by": "text", "sort_order": "ASC"}])) + assert len(found) == 6 + assert found[0]["text"] == "bar" + assert found[-1]["text"] == "foo" + + # Test find with sort expression (multiple fields) + found = list( + col.find( + {}, + sort=[ + {"sort_by": "text", "sort_order": "ASC"}, + {"sort_by": "val", "sort_order": "DESC"}, + ], + ) + ) + assert len(found) == 6 + assert found[0]["val"] == 6 + assert found[-1]["val"] == 1 + # Test find (single match) with default options found = list(col.find({"val": 2})) assert len(found) == 1 @@ -1240,6 +1273,10 @@ def test_document_find(col, bad_col, docs): col.insert({"foo bar": "baz"}) assert len(list(col.find({"foo bar": "baz"}))) == 1 + # Test find by nested attribute + col.insert({"foo": {"bar": "baz"}}) + assert len(list(col.find({"foo.bar": "baz"}))) == 1 + def test_document_find_near(col, bad_col, docs): col.import_bulk(docs) @@ -1463,7 +1500,8 @@ def test_document_find_in_box(db, col, bad_col, geo, cluster): ) # Test find_in_box with non-geo index - non_geo = col.add_hash_index(fields=["loc"]) + persistent_index = {"type": "persistent", "fields": ["loc"]} + non_geo = col.add_index(persistent_index) with assert_raises(ValueError) as err: col.find_in_box( latitude1=0, @@ -1552,7 +1590,7 @@ def test_document_has(col, bad_col, docs): with assert_raises(DocumentRevisionError) as err: col.has(doc_input, rev=bad_rev, check_rev=True) - assert err.value.error_code == 1200 + assert err.value.error_code == 412 # Test existing documents with bad revision for doc_input in [ @@ -1562,15 +1600,15 @@ def test_document_has(col, bad_col, docs): ]: with assert_raises(DocumentRevisionError) as err: col.has(doc_input) - assert err.value.error_code == 1200 + assert err.value.error_code == 412 with assert_raises(DocumentRevisionError) as err: col.has(doc_input, rev=bad_rev) - assert err.value.error_code == 1200 + assert err.value.error_code == 412 with assert_raises(DocumentRevisionError) as err: col.has(doc_input, rev=bad_rev, check_rev=True) - assert err.value.error_code == 1200 + assert err.value.error_code == 412 assert doc_input in col assert col.has(doc_input, rev=rev, check_rev=True) is True @@ -1649,12 +1687,12 @@ def test_document_has(col, bad_col, docs): # Test get with bad database with assert_raises(DocumentInError) as err: bad_col.has(doc_key) - assert err.value.error_code in {11, 1228} + assert err.value.error_code == 401 # Test contains with bad database with assert_raises(DocumentInError) as err: assert doc_key in bad_col - assert err.value.error_code in {11, 1228} + assert err.value.error_code == 401 def test_document_get(col, bad_col, docs): @@ -2067,3 +2105,74 @@ def test_document_management_via_db(db, col): assert result["_id"] == doc1_id assert doc1_id not in col assert len(col) == 2 + + +def test_version_attributes_update_many(col, db_version): + if db_version < version.parse("3.12.0"): + pytest.skip("Version attributes is tested in 3.12.0+") + + col.insert_many( + [ + {"_key": "test1", "version": 0}, + {"_key": "test2", "version": 0}, + {"_key": "test3", "version": 0}, + ] + ) + + docs = [ + {"_key": "test1", "version": 2}, + {"_key": "test1", "version": 3}, + {"_key": "test1", "version": 1}, + {"_key": "test2", "version": 1}, + {"_key": "test2", "version": 9}, + {"_key": "test2", "version": 42}, + {"_key": "test2", "version": 0}, + {"_key": "test3"}, + {"_key": "test3", "version": 5}, + {"_key": "test3", "version": 4}, + {"_key": "test3", "value": 2}, + ] + + col.update_many(docs, version_attribute="version") + assert col["test1"]["version"] == 3 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 5 + + docs = [ + {"_key": "test1", "version": 2}, + {"_key": "test1", "version": 3}, + {"_key": "test1", "version": 5}, + {"_key": "test2", "version": 1}, + {"_key": "test2", "version": 9}, + {"_key": "test2", "version": 42}, + {"_key": "test2", "version": 0}, + {"_key": "test3", "version": 5}, + {"_key": "test3", "version": 6}, + ] + + col.replace_many(docs, version_attribute="version") + assert col["test1"]["version"] == 5 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 6 + + docs = [ + {"_key": "test1", "version": 0}, + {"_key": "test2", "version": 0}, + {"_key": "test3", "version": 0}, + ] + + col.insert_many(docs, overwrite_mode="update", version_attribute="version") + assert col["test1"]["version"] == 5 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 6 + + docs = [ + {"_key": "test1", "version": 43}, + {"_key": "test2", "version": 41}, + {"_key": "test3", "version": 43}, + ] + + col.insert_many(docs, overwrite_mode="replace", version_attribute="version") + assert col["test1"]["version"] == 43 + assert col["test2"]["version"] == 42 + assert col["test3"]["version"] == 43 diff --git a/tests/test_graph.py b/tests/test_graph.py index 8ca4d0c2..fe63455d 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -1,3 +1,6 @@ +import pytest +from packaging import version + from arango.collection import EdgeCollection from arango.exceptions import ( DocumentDeleteError, @@ -48,7 +51,7 @@ def test_graph_properties(graph, bad_graph, db): bad_graph.properties() new_graph_name = generate_graph_name() - new_graph = db.create_graph(new_graph_name) + new_graph = db.create_graph(new_graph_name, sync=True) properties = new_graph.properties() assert properties["id"] == f"_graphs/{new_graph_name}" assert properties["name"] == new_graph_name @@ -1071,7 +1074,10 @@ def test_edge_management_via_graph(graph, ecol, fvcol, fvdocs, tvcol, tvdocs): assert len(ecol) == 1 -def test_traverse(db): +def test_traverse(db, db_version): + if db_version >= version.parse("3.12.0"): + pytest.skip("Traversal API is no longer available for ArangoDB 3.12+") + # Create test graph, vertex and edge collections school = db.create_graph(generate_graph_name()) profs = school.create_vertex_collection(generate_col_name()) diff --git a/tests/test_index.py b/tests/test_index.py index dbf235fa..a5d0f5eb 100644 --- a/tests/test_index.py +++ b/tests/test_index.py @@ -50,19 +50,22 @@ def test_add_hash_index(icol): icol = icol fields = ["attr1", "attr2"] - result = icol.add_hash_index( - fields=fields, - unique=True, - sparse=True, - deduplicate=True, - name="hash_index", - in_background=False, + result = icol.add_index( + { + "type": "hash", + "fields": fields, + "unique": True, + "sparse": True, + "deduplicate": True, + "name": "hash_index", + "inBackground": False, + } ) expected_index = { "sparse": True, "type": "hash", - "fields": ["attr1", "attr2"], + "fields": fields, "unique": True, "deduplicate": True, "name": "hash_index", @@ -78,13 +81,16 @@ def test_add_hash_index(icol): def test_add_skiplist_index(icol): fields = ["attr1", "attr2"] - result = icol.add_skiplist_index( - fields=fields, - unique=True, - sparse=True, - deduplicate=True, - name="skiplist_index", - in_background=False, + result = icol.add_index( + { + "type": "skiplist", + "fields": fields, + "unique": True, + "sparse": True, + "deduplicate": True, + "name": "skiplist_index", + "inBackground": False, + } ) expected_index = { @@ -106,8 +112,14 @@ def test_add_skiplist_index(icol): def test_add_geo_index(icol): # Test add geo index with one attribute - result = icol.add_geo_index( - fields=["attr1"], ordered=False, name="geo_index", in_background=True + result = icol.add_index( + { + "type": "geo", + "fields": ["attr1"], + "geoJson": True, + "name": "geo_index", + "inBackground": True, + } ) expected_index = { @@ -115,18 +127,21 @@ def test_add_geo_index(icol): "type": "geo", "fields": ["attr1"], "unique": False, - "geo_json": False, + "geoJson": True, "name": "geo_index", } for key, value in expected_index.items(): - assert result[key] == value + assert result[key] == value, (key, value, result[key]) assert result["id"] in extract("id", icol.indexes()) # Test add geo index with two attributes - result = icol.add_geo_index( - fields=["attr1", "attr2"], - ordered=False, + result = icol.add_index( + { + "type": "geo", + "fields": ["attr1", "attr2"], + "geoJson": False, + } ) expected_index = { "sparse": True, @@ -141,7 +156,7 @@ def test_add_geo_index(icol): # Test add geo index with more than two attributes (should fail) with assert_raises(IndexCreateError) as err: - icol.add_geo_index(fields=["attr1", "attr2", "attr3"]) + icol.add_index({"type": "geo", "fields": ["attr1", "attr2", "attr3"]}) assert err.value.error_code == 10 # Clean up the index @@ -150,14 +165,20 @@ def test_add_geo_index(icol): def test_add_fulltext_index(icol): # Test add fulltext index with one attributes - result = icol.add_fulltext_index( - fields=["attr1"], min_length=10, name="fulltext_index", in_background=True + result = icol.add_index( + { + "type": "fulltext", + "fields": ["attr1"], + "minLength": 10, + "name": "fulltext_index", + "inBackground": True, + } ) expected_index = { "sparse": True, "type": "fulltext", "fields": ["attr1"], - "min_length": 10, + "minLength": 10, "unique": False, "name": "fulltext_index", } @@ -168,7 +189,7 @@ def test_add_fulltext_index(icol): # Test add fulltext index with two attributes (should fail) with assert_raises(IndexCreateError) as err: - icol.add_fulltext_index(fields=["attr1", "attr2"]) + icol.add_index({"type": "fulltext", "fields": ["attr1", "attr2"]}) assert err.value.error_code == 10 # Clean up the index @@ -177,12 +198,15 @@ def test_add_fulltext_index(icol): def test_add_persistent_index(icol): # Test add persistent index with two attributes - result = icol.add_persistent_index( - fields=["attr1", "attr2"], - unique=True, - sparse=True, - name="persistent_index", - in_background=True, + result = icol.add_index( + { + "type": "persistent", + "fields": ["attr1", "attr2"], + "unique": True, + "sparse": True, + "name": "persistent_index", + "inBackground": True, + } ) expected_index = { "sparse": True, @@ -202,13 +226,19 @@ def test_add_persistent_index(icol): def test_add_ttl_index(icol): # Test add persistent index with two attributes - result = icol.add_ttl_index( - fields=["attr1"], expiry_time=1000, name="ttl_index", in_background=True + result = icol.add_index( + { + "type": "ttl", + "fields": ["attr1"], + "expireAfter": 1000, + "name": "ttl_index", + "inBackground": True, + } ) expected_index = { "type": "ttl", "fields": ["attr1"], - "expiry_time": 1000, + "expireAfter": 1000, "name": "ttl_index", } for key, value in expected_index.items(): @@ -220,10 +250,7 @@ def test_add_ttl_index(icol): icol.delete_index(result["id"]) -def test_add_inverted_index(icol, enterprise, db_version): - if db_version < version.parse("3.10.0"): - pytest.skip("Inverted indexes are not supported before 3.10.0") - +def test_add_inverted_index(icol, enterprise): parameters = dict( fields=[{"name": "attr1", "cache": True}], name="c0_cached", @@ -232,14 +259,14 @@ def test_add_inverted_index(icol, enterprise, db_version): analyzer="identity", primarySort={"cache": True, "fields": [{"field": "a", "direction": "asc"}]}, ) - expected_keys = ["primary_sort", "analyzer", "include_all_fields", "search_field"] + expected_keys = ["primarySort", "analyzer", "includeAllFields", "searchField"] - if enterprise and db_version >= version.parse("3.10.2"): + if enterprise: parameters["cache"] = True parameters["primaryKeyCache"] = True expected_keys.extend(["cache", "primaryKeyCache"]) - result = icol.add_inverted_index(**parameters) + result = icol.add_index({"type": "inverted", **parameters}) assert result["id"] in extract("id", icol.indexes()) for key in expected_keys: @@ -248,11 +275,89 @@ def test_add_inverted_index(icol, enterprise, db_version): icol.delete_index(result["id"]) +def test_add_zkd_index(icol, db_version): + result = icol.add_index( + { + "type": "zkd", + "fields": ["x", "y", "z"], + "fieldValueTypes": "double", + "name": "zkd_index", + "inBackground": False, + "unique": False, + } + ) + + expected_index = { + "name": "zkd_index", + "type": "zkd", + "fields": ["x", "y", "z"], + "isNewlyCreated": True, + "unique": False, + } + + for key, value in expected_index.items(): + assert result[key] == value + + assert result["id"] in extract("id", icol.indexes()) + + with assert_raises(IndexCreateError) as err: + icol.add_index( + {"type": "zkd", "fieldValueTypes": "integer", "fields": ["x", "y", "z"]} + ) + assert err.value.error_code == 10 + + icol.delete_index(result["id"]) + + +def test_add_mdi_index(icol, db_version): + if db_version < version.parse("3.12.0"): + pytest.skip("MDI indexes are usable with 3.12+ only") + + result = icol.add_index( + { + "type": "mdi", + "fields": ["x", "y", "z"], + "fieldValueTypes": "double", + "name": "mdi_index", + "inBackground": False, + "unique": True, + } + ) + + expected_index = { + "name": "mdi_index", + "type": "mdi", + "fields": ["x", "y", "z"], + "isNewlyCreated": True, + "unique": True, + } + + for key, value in expected_index.items(): + assert result[key] == value + + assert result["id"] in extract("id", icol.indexes()) + + with assert_raises(IndexCreateError) as err: + icol.add_index( + { + "type": "mdi", + "fieldValueTypes": "integer", + "fields": ["x", "y", "z"], + } + ) + assert err.value.error_code == 10 + + icol.delete_index(result["id"]) + + def test_delete_index(icol, bad_col): old_indexes = set(extract("id", icol.indexes())) - icol.add_hash_index(["attr3", "attr4"], unique=True) - icol.add_skiplist_index(["attr3", "attr4"], unique=True) - icol.add_fulltext_index(fields=["attr3"], min_length=10) + hash_index = {"type": "hash", "fields": ["attr1", "attr2"], "unique": True} + icol.add_index(hash_index) + skiplist_Index = {"type": "skiplist", "fields": ["attr3", "attr4"], "unique": True} + icol.add_index(skiplist_Index) + fulltext_index = {"type": "fulltext", "fields": ["attr5"], "min_length": 10} + icol.add_index(fulltext_index) new_indexes = set(extract("id", icol.indexes())) assert new_indexes.issuperset(old_indexes) diff --git a/tests/test_pregel.py b/tests/test_pregel.py index e17da72b..2be8d5f0 100644 --- a/tests/test_pregel.py +++ b/tests/test_pregel.py @@ -3,15 +3,14 @@ import pytest from packaging import version -from arango.exceptions import ( - PregelJobCreateError, - PregelJobDeleteError, - PregelJobGetError, -) +from arango.exceptions import PregelJobCreateError, PregelJobDeleteError from tests.helpers import assert_raises, generate_string -def test_pregel_attributes(db, username): +def test_pregel_attributes(db, db_version, username): + if db_version >= version.parse("3.12.0"): + pytest.skip("Pregel is not tested in 3.12.0+") + assert db.pregel.context in ["default", "async", "batch", "transaction"] assert db.pregel.username == username assert db.pregel.db_name == db.name @@ -19,6 +18,9 @@ def test_pregel_attributes(db, username): def test_pregel_management(db, db_version, graph, cluster): + if db_version >= version.parse("3.12.0"): + pytest.skip("Pregel is not tested in 3.12.0+") + if cluster: pytest.skip("Not tested in a cluster setup") @@ -52,13 +54,8 @@ def test_pregel_management(db, db_version, graph, cluster): # Test delete existing pregel job assert db.pregel.delete_job(job_id) is True time.sleep(0.2) - if db_version < version.parse("3.11.0"): - with assert_raises(PregelJobGetError) as err: - db.pregel.job(job_id) - assert err.value.error_code in {4, 10, 1600} - else: - job = db.pregel.job(job_id) - assert job["state"] == "canceled" + job = db.pregel.job(job_id) + assert job["state"] == "canceled" # Test delete missing pregel job with assert_raises(PregelJobDeleteError) as err: diff --git a/tests/test_transaction.py b/tests/test_transaction.py index 59e86b7c..75ec28a2 100644 --- a/tests/test_transaction.py +++ b/tests/test_transaction.py @@ -1,10 +1,12 @@ import pytest +from packaging import version from arango.database import TransactionDatabase from arango.exceptions import ( TransactionAbortError, TransactionCommitError, TransactionExecuteError, + TransactionFetchError, TransactionInitError, TransactionStatusError, ) @@ -15,14 +17,15 @@ def test_transaction_execute_raw(db, col, docs): # Test execute raw transaction doc = docs[0] key = doc["_key"] - result = db.execute_transaction( - command=f""" + command = f""" function (params) {{ var db = require('internal').db; db.{col.name}.save({{'_key': params.key, 'val': 1}}); return true; }} - """, + """ # noqa: E702 E231 E272 E202 + result = db.execute_transaction( + command=command, params={"key": key}, write=[col.name], read=[col.name], @@ -42,7 +45,7 @@ def test_transaction_execute_raw(db, col, docs): assert err.value.error_code == 10 -def test_transaction_init(db, bad_db, col, username): +def test_transaction_init(db, db_version, bad_db, col, username): txn_db = db.begin_transaction() assert isinstance(txn_db, TransactionDatabase) @@ -67,6 +70,22 @@ def test_transaction_init(db, bad_db, col, username): bad_db.begin_transaction() assert err.value.error_code in {11, 1228} + # Test all options + kwargs = dict( + read=col.name, + write=col.name, + exclusive=[], + sync=True, + allow_implicit=False, + lock_timeout=1000, + max_size=1024 * 1024, + ) + if db_version >= version.parse("3.12.1"): + kwargs["skip_fast_lock_round"] = True + txn_db = db.begin_transaction(**kwargs) + assert isinstance(txn_db, TransactionDatabase) + assert txn_db.transaction_id is not None + def test_transaction_status(db, col, docs): txn_db = db.begin_transaction(read=col.name) @@ -96,7 +115,7 @@ def test_transaction_commit(db, col, docs): sync=True, allow_implicit=False, lock_timeout=1000, - max_size=10000, + max_size=1024 * 1024, # 1MB ) txn_col = txn_db.collection(col.name) @@ -117,6 +136,38 @@ def test_transaction_commit(db, col, docs): assert err.value.error_code in {10, 1655} +def test_transaction_fetch_existing(db, col, docs): + original_txn = db.begin_transaction( + read=col.name, + write=col.name, + exclusive=[], + sync=True, + allow_implicit=False, + lock_timeout=1000, + max_size=1024 * 1024, # 1MB + ) + txn_col = original_txn.collection(col.name) + + assert "_rev" in txn_col.insert(docs[0]) + assert "_rev" in txn_col.delete(docs[0]) + + txn_db = db.fetch_transaction(transaction_id=original_txn.transaction_id) + + txn_col = txn_db.collection(col.name) + assert "_rev" in txn_col.insert(docs[1]) + assert "_rev" in txn_col.delete(docs[1]) + + txn_db.commit_transaction() + assert txn_db.transaction_status() == "committed" + assert original_txn.transaction_status() == "committed" + assert txn_db.transaction_id == original_txn.transaction_id + + # Test fetch transaction that does not exist + with pytest.raises(TransactionFetchError) as err: + db.fetch_transaction(transaction_id="illegal") + assert err.value.error_code in {10, 1655} + + def test_transaction_abort(db, col, docs): txn_db = db.begin_transaction(write=col.name) txn_col = txn_db.collection(col.name) diff --git a/tests/test_view.py b/tests/test_view.py index fd8a5640..778f87e6 100644 --- a/tests/test_view.py +++ b/tests/test_view.py @@ -12,7 +12,7 @@ from tests.helpers import assert_raises, generate_view_name -def test_view_management(db, bad_db, col, cluster): +def test_view_management(db, bad_db, col, cluster, db_version, enterprise): # Test create view view_name = generate_view_name() bad_view_name = generate_view_name() @@ -124,6 +124,20 @@ def test_view_management(db, bad_db, col, cluster): # Test delete missing view with ignore_missing set to True assert db.delete_view(view_name, ignore_missing=True) is False + if enterprise and db_version >= version.parse("3.12"): + res = db.create_view( + view_name, + view_type, + properties={ + "links": {col.name: {"fields": {}}}, + "optimizeTopK": [ + "BM25(@doc) DESC", + ], + }, + ) + assert "optimizeTopK" in res + db.delete_view(view_name) + def test_arangosearch_view_management(db, bad_db, cluster): # Test create arangosearch view @@ -180,7 +194,7 @@ def test_arangosearch_view_management(db, bad_db, cluster): assert db.delete_view(view_name, ignore_missing=False) is True -def test_arangosearch_view_properties(db, col, enterprise, db_version): +def test_arangosearch_view_properties(db, col, enterprise): view_name = generate_view_name() params = {"consolidationIntervalMsec": 50000} @@ -199,10 +213,8 @@ def test_arangosearch_view_properties(db, col, enterprise, db_version): } ) - if db_version >= version.parse("3.9.6"): - params.update({"primarySortCache": True, "primaryKeyCache": True}) - if db_version >= version.parse("3.10.3"): - params.update({"storedValues": ["attr1", "attr2"]}) + params.update({"primarySortCache": True, "primaryKeyCache": True}) + params.update({"storedValues": ["attr1", "attr2"]}) result = db.create_arangosearch_view(view_name, properties=params) assert "id" in result